diff --git a/.gitignore b/.gitignore index c111936..ee0c04c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,26 +1,15 @@ -# Copyright (c) 2017 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - .DS_Store -# Eclipse .classpath .project -# Gogland .idea/ -# Blast bin/ dist/ + +*.pem +*.csr + +cover.out +cover.html diff --git a/CHANGES.md b/CHANGES.md index 5eb9607..4c9c85a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,63 +5,92 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). - ## [Unreleased] +- Dockerfile fixes #148 -### Added +## [v0.10.0] +- Upgrade Bleve #145 +- Fix typo in README.md #142 -### Changed +## [v0.9.1] + +- Update tests #139 +- Update protocol buffers #135 +- Update zap #134 +- Update gRPC #133 +- Update raft #132 +- Update tests #131 +- Upgrade Bleve to v1.0.9 #130 +- Add test #129 + +## [v0.9.0] + +- Implement CORS #128 +- Delete the experimentally implemented feature for distributed search #127 +- Add coverage to Makefile #114 +- Docker compose #119 +- Bump Bleve version to v0.8.1 #117 + + +## [v0.8.1] + +- Update go version and dependencies #109 -## [v0.7.1] - 2019-07-18 -### Added +## [v0.8.0] + +- Add swagger specification experimentaly #107 +- New CLI #82 +- Split protobuf into components #84 +- Change subcommands #85 +- Update protobuf #86 +- Change protobuf #87 +- Change the cluster watching method #90 +- Change cluster watch command for manager #92 +- Change node state to enum from string #93 +- Change node info structure #94 +- Change protobuf for indexer and dispatcher #95 +- Change server arguments #96 +- Change index protobuf #97 +- Use protobuf document #98 +- Change node state to Node_SHUTDOWN in a error #99 +- Fix a bug for waiting to receive an indexer cluster updates from the stream #100 +- Migrate to grpc-gateway #105 + + +## [v0.7.1] - 2019-07-18 - Add raft-badger #69 - Add raft-storage-type flag #73 - Add gRPC access logger #74 - -### Changed - - Improve indexing performance #71 - Remove original document #72 - Rename config package to builtins #75 -## [v0.7.0] - 2019-07-03 -### Added +## [v0.7.0] - 2019-07-03 - Add GEO search example #65 - -### Changed - - Migrate grpc-middleware #68 -## [v0.6.1] - 2019-06-21 - -### Added -### Changed +## [v0.6.1] - 2019-06-21 - Fix HTTP response into JSON format #64 - Update Dockerfile #62 -## [v0.6.0] - 2019-06-19 -### Added +## [v0.6.0] - 2019-06-19 - Add federated search #30 - Add cluster manager (#48) - Add KVS HTTP handlers #46 - -### Changed - - Update http logger #51 - Update logutils (#50) - Remve KVS (#49) -## [v0.5.0] - 2019-03-22 -### Added +## [v0.5.0] - 2019-03-22 - Support bulk update #41 - Support Badger #38 @@ -71,9 +100,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Add logging #33 - Add CHANGES.md #29 - Add error handling for server startup #28. - -### Changed - - Fixed some badger bugs #40 - Restructure store package #36 - Update examples #32 @@ -82,6 +108,4 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [v0.4.0] - 2019-03-14 -### Changed - - Code refactoring. diff --git a/Dockerfile b/Dockerfile index 06bdb1b..08f4a1d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,4 @@ -# Copyright (c) 2019 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM golang:1.12.1-stretch +FROM golang:1.15.6-buster ARG VERSION @@ -23,18 +9,18 @@ COPY . ${GOPATH}/src/github.com/mosuka/blast RUN echo "deb http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> /etc/apt/sources.list && \ echo "deb-src http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> /etc/apt/sources.list && \ apt-get update && \ + apt-get upgrade -y && \ apt-get install -y \ - git \ - golang \ - libicu-dev \ - libstemmer-dev \ - libleveldb-dev \ - gcc-4.8 \ - g++-4.8 \ - build-essential && \ + git \ + # golang \ + libicu-dev \ + libstemmer-dev \ + gcc-4.8 \ + g++-4.8 \ + build-essential && \ apt-get clean && \ - update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-6 80 && \ - update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-6 80 && \ + #update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-6 80 && \ + #update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-6 80 && \ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 90 && \ update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 && \ go get -u -v github.com/blevesearch/cld2 && \ @@ -44,30 +30,29 @@ RUN echo "deb http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> ./compile_libs.sh && \ cp *.so /usr/local/lib && \ cd ${GOPATH}/src/github.com/mosuka/blast && \ - make \ - GOOS=linux \ - GOARCH=amd64 \ - CGO_ENABLED=1 \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - VERSION="${VERSION}" \ - build + make GOOS=linux \ + GOARCH=amd64 \ + CGO_ENABLED=1 \ + BUILD_TAGS="kagome icu libstemmer cld2" \ + VERSION="${VERSION}" \ + build -FROM debian:stretch-slim +FROM debian:buster-slim MAINTAINER Minoru Osuka "minoru.osuka@gmail.com" RUN apt-get update && \ + apt-get upgrade -y && \ apt-get install -y \ - libicu-dev \ - libstemmer-dev \ - libleveldb-dev && \ - apt-get clean + libicu-dev \ + libstemmer-dev && \ + apt-get clean && \ + rm -rf /var/cache/apk/* COPY --from=0 /go/src/github.com/blevesearch/cld2/cld2/internal/*.so /usr/local/lib/ COPY --from=0 /go/src/github.com/mosuka/blast/bin/* /usr/bin/ -COPY --from=0 /go/src/github.com/mosuka/blast/docker-entrypoint.sh /usr/bin/ -EXPOSE 5000 5001 5002 +EXPOSE 7000 8000 9000 -ENTRYPOINT [ "/usr/bin/docker-entrypoint.sh" ] -CMD [ "blastd", "--help" ] +ENTRYPOINT [ "/usr/bin/blast" ] +CMD [ "start" ] diff --git a/Makefile b/Makefile index 484515f..a47d17d 100644 --- a/Makefile +++ b/Makefile @@ -1,34 +1,29 @@ -# Copyright (c) 2019 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -GOOS ?= linux -GOARCH ?= amd64 +GOOS ?= +GOARCH ?= +GO111MODULE ?= on CGO_ENABLED ?= 0 CGO_CFLAGS ?= CGO_LDFLAGS ?= -BUILD_TAGS ?= -DOCKER_REPOSITORY ?= mosuka +BUILD_TAGS ?= kagome VERSION ?= BIN_EXT ?= - -GO := GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO_ENABLED) CGO_CFLAGS=$(CGO_CFLAGS) CGO_LDFLAGS=$(CGO_LDFLAGS) GO111MODULE=on go +DOCKER_REPOSITORY ?= mosuka PACKAGES = $(shell $(GO) list ./... | grep -v '/vendor/') PROTOBUFS = $(shell find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) -TARGET_PACKAGES = $(shell find . -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) +TARGET_PACKAGES = $(shell find $(CURDIR) -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) + +GRPC_GATEWAY_PATH = $(shell $(GO) list -m -f "{{.Dir}}" github.com/grpc-ecosystem/grpc-gateway) + +ifeq ($(GOOS),) + GOOS = $(shell go version | awk -F ' ' '{print $$NF}' | awk -F '/' '{print $$1}') +endif + +ifeq ($(GOARCH),) + GOARCH = $(shell go version | awk -F ' ' '{print $$NF}' | awk -F '/' '{print $$2}') +endif ifeq ($(VERSION),) VERSION = latest @@ -39,71 +34,83 @@ ifeq ($(GOOS),windows) BIN_EXT = .exe endif +GO := GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO_ENABLED) CGO_CFLAGS=$(CGO_CFLAGS) CGO_LDFLAGS=$(CGO_LDFLAGS) GO111MODULE=$(GO111MODULE) go + .DEFAULT_GOAL := build +.PHONY: show-env +show-env: + @echo ">> show env" + @echo " GOOS = $(GOOS)" + @echo " GOARCH = $(GOARCH)" + @echo " GO111MODULE = $(GO111MODULE)" + @echo " CGO_ENABLED = $(CGO_ENABLED)" + @echo " CGO_CFLAGS = $(CGO_CFLAGS)" + @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" + @echo " BUILD_TAGS = $(BUILD_TAGS)" + @echo " VERSION = $(VERSION)" + @echo " BIN_EXT = $(BIN_EXT)" + @echo " DOCKER_REPOSITORY = $(DOCKER_REPOSITORY)" + @echo " LDFLAGS = $(LDFLAGS)" + @echo " PACKAGES = $(PACKAGES)" + @echo " PROTOBUFS = $(PROTOBUFS)" + @echo " TARGET_PACKAGES = $(TARGET_PACKAGES)" + @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" + .PHONY: protoc -protoc: +protoc: show-env @echo ">> generating proto3 code" - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done .PHONY: format -format: +format: show-env @echo ">> formatting code" - @$(GO) fmt $(PACKAGES) + $(GO) fmt $(PACKAGES) .PHONY: test -test: +test: show-env @echo ">> testing all packages" - @echo " GOOS = $(GOOS)" - @echo " GOARCH = $(GOARCH)" - @echo " CGO_ENABLED = $(CGO_ENABLED)" - @echo " CGO_CFLAGS = $(CGO_CFLAGS)" - @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" - @echo " BUILD_TAGS = $(BUILD_TAGS)" - @$(GO) test -v -tags="$(BUILD_TAGS)" $(PACKAGES) + $(GO) test -v -tags="$(BUILD_TAGS)" $(PACKAGES) + +.PHONY: coverage +coverage: show-env + @echo ">> checking coverage of all packages" + $(GO) test -coverprofile=./cover.out -tags="$(BUILD_TAGS)" $(PACKAGES) + $(GO) tool cover -html=cover.out -o cover.html + +.PHONY: clean +clean: show-env + @echo ">> cleaning binaries" + rm -rf ./bin + rm -rf ./data + rm -rf ./dist .PHONY: build -build: +build: show-env @echo ">> building binaries" - @echo " GOOS = $(GOOS)" - @echo " GOARCH = $(GOARCH)" - @echo " CGO_ENABLED = $(CGO_ENABLED)" - @echo " CGO_CFLAGS = $(CGO_CFLAGS)" - @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" - @echo " BUILD_TAGS = $(BUILD_TAGS)" - @echo " VERSION = $(VERSION)" - @for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done + for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done .PHONY: install -install: +install: show-env @echo ">> installing binaries" - @echo " GOOS = $(GOOS)" - @echo " GOARCH = $(GOARCH)" - @echo " CGO_ENABLED = $(CGO_ENABLED)" - @echo " CGO_CFLAGS = $(CGO_CFLAGS)" - @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" - @echo " BUILD_TAGS = $(BUILD_TAGS)" - @echo " VERSION = $(VERSION)" - @for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) install -tags="$(BUILD_TAGS)" $(LDFLAGS) $$target_pkg || exit 1; done + for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) install -tags="$(BUILD_TAGS)" $(LDFLAGS) $$target_pkg || exit 1; done .PHONY: dist -dist: +dist: show-env @echo ">> packaging binaries" - @echo " GOOS = $(GOOS)" - @echo " GOARCH = $(GOARCH)" - @echo " CGO_ENABLED = $(CGO_ENABLED)" - @echo " CGO_CFLAGS = $(CGO_CFLAGS)" - @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" - @echo " BUILD_TAGS = $(BUILD_TAGS)" - @echo " VERSION = $(VERSION)" mkdir -p ./dist/$(GOOS)-$(GOARCH)/bin - @for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./dist/$(GOOS)-$(GOARCH)/bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done + for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./dist/$(GOOS)-$(GOARCH)/bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done (cd ./dist/$(GOOS)-$(GOARCH); tar zcfv ../blast-${VERSION}.$(GOOS)-$(GOARCH).tar.gz .) -.PHONY: git-tag -git-tag: +.PHONY: list-tag +list-tag: + @echo ">> listing github tags" + git tag -l --sort=-v:refname + +.PHONY: tag +tag: show-env @echo ">> tagging github" - @echo " VERSION = $(VERSION)" ifeq ($(VERSION),$(filter $(VERSION),latest master "")) @echo "please specify VERSION" else @@ -112,24 +119,22 @@ else endif .PHONY: docker-build -docker-build: +docker-build: show-env @echo ">> building docker container image" - @echo " DOCKER_REPOSITORY = $(DOCKER_REPOSITORY)" - @echo " VERSION = $(VERSION)" docker build -t $(DOCKER_REPOSITORY)/blast:latest --build-arg VERSION=$(VERSION) . docker tag $(DOCKER_REPOSITORY)/blast:latest $(DOCKER_REPOSITORY)/blast:$(VERSION) .PHONY: docker-push -docker-push: +docker-push: show-env @echo ">> pushing docker container image" - @echo " DOCKER_REPOSITORY = $(DOCKER_REPOSITORY)" - @echo " VERSION = $(VERSION)" docker push $(DOCKER_REPOSITORY)/blast:latest docker push $(DOCKER_REPOSITORY)/blast:$(VERSION) -.PHONY: clean -clean: - @echo ">> cleaning binaries" - rm -rf ./bin - rm -rf ./data - rm -rf ./dist +.PHONY: docker-clean +docker-clean: show-env + docker rmi -f $(shell docker images --filter "dangling=true" -q --no-trunc) + +.PHONY: cert +cert: show-env + @echo ">> generating certification" + openssl req -x509 -nodes -newkey rsa:4096 -keyout ./etc/blast_key.pem -out ./etc/blast_cert.pem -days 365 -subj '/CN=localhost' diff --git a/README.md b/README.md index 11b9d81..c2e7dc7 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,7 @@ - # Blast @@ -31,22 +17,21 @@ Blast makes it easy for programmers to develop search applications with advanced - Faceted search - Spatial/Geospatial search - Search result highlighting -- Distributed search/indexing - Index replication - Bringing up cluster -- Cluster Federation - An easy-to-use HTTP API - CLI is available - Docker container image is available -## Installing dependencies +## Install build dependencies Blast requires some C/C++ libraries if you need to enable cld2, icu, libstemmer or leveldb. The following sections are instructions for satisfying dependencies on particular platforms. ### Ubuntu 18.10 ```bash +$ sudo apt-get update $ sudo apt-get install -y \ libicu-dev \ libstemmer-dev \ @@ -71,7 +56,6 @@ $ ./compile_libs.sh $ sudo cp *.so /usr/local/lib ``` - ### macOS High Sierra Version 10.13.6 ```bash @@ -90,66 +74,73 @@ $ sudo cp *.so /usr/local/lib ``` -## Building Blast +## Build -When you satisfied dependencies, let's build Blast for Linux as following: +Building Blast as following: ```bash $ mkdir -p ${GOPATH}/src/github.com/mosuka $ cd ${GOPATH}/src/github.com/mosuka $ git clone https://github.com/mosuka/blast.git $ cd blast -$ make build +$ make ``` -If you want to build for other platform, set `GOOS`, `GOARCH` environment variables. For example, build for macOS like following: +If you omit `GOOS` or `GOARCH`, it will build the binary of the platform you are using. +If you want to specify the target platform, please set `GOOS` and `GOARCH` environment variables. + +### Linux ```bash -$ make \ - GOOS=darwin \ - build +$ make GOOS=linux build ``` -Blast supports some [Bleve Extensions (blevex)](https://github.com/blevesearch/blevex). If you want to build with them, please set `CGO_LDFLAGS`, `CGO_CFLAGS`, `CGO_ENABLED` and `BUILD_TAGS`. For example, build LevelDB to be available for index storage as follows: +### macOS ```bash -$ make \ - GOOS=linux \ - BUILD_TAGS=leveldb \ - CGO_ENABLED=1 \ - build +$ make GOOS=darwin build ``` -You can enable all the Bleve extensions supported by Blast as follows: +### Windows +```bash +$ make GOOS=windows build +``` + +## Build with extensions -### Linux +Blast supports some Bleve Extensions (blevex). If you want to build with them, please set CGO_LDFLAGS, CGO_CFLAGS, CGO_ENABLED and BUILD_TAGS. For example, build LevelDB to be available for index storage as follows: ```bash -$ make \ - GOOS=linux \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - build +$ make GOOS=linux \ + BUILD_TAGS=icu \ + CGO_ENABLED=1 \ + build ``` - -#### macOS +### Linux ```bash -$ make \ - GOOS=darwin \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ - CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ - build +$ make GOOS=linux \ + BUILD_TAGS="kagome icu libstemmer cld2" \ + CGO_ENABLED=1 \ + build ``` +### macOS + +```bash +$ make GOOS=darwin \ + BUILD_TAGS="kagome icu libstemmer cld2" \ + CGO_ENABLED=1 \ + CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ + CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ + build +``` ### Build flags -Please refer to the following table for details of Bleve Extensions: +Refer to the following table for the build flags of the supported Bleve extensions: | BUILD_TAGS | CGO_ENABLED | Description | | ---------- | ----------- | ----------- | @@ -157,702 +148,505 @@ Please refer to the following table for details of Bleve Extensions: | kagome | 0 | Enable Japanese Language Analyser | | icu | 1 | Enable ICU Tokenizer, Thai Language Analyser | | libstemmer | 1 | Enable Language Stemmer (Danish, German, English, Spanish, Finnish, French, Hungarian, Italian, Dutch, Norwegian, Portuguese, Romanian, Russian, Swedish, Turkish) | -| cznicb | 0 | Enable cznicb KV store | -| leveldb | 1 | Enable LevelDB | -| badger | 0 | Enable Badger (This feature is considered experimental) | -If you want to enable the feature whose `CGO_ENABLE` is `1`, please install it referring to the Installing dependencies section above. +If you want to enable the feature whose `CGO_ENABLE` is `1`, please install it referring to the Install build dependencies section above. -### Binaries +## Binary You can see the binary file when build successful like so: ```bash $ ls ./bin -blast blastd +blast ``` -## Testing Blast +## Test If you want to test your changes, run command like following: ```bash -$ make \ - test +$ make test ``` -You can test with all the Bleve extensions supported by Blast as follows: +If you want to specify the target platform, set `GOOS` and `GOARCH` environment variables in the same way as the build. -### Linux +## Package + +To create a distribution package, run the following command: ```bash -$ make \ - GOOS=linux \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - test +$ make dist ``` -#### macOS +## Configure + +Blast can change its startup options with configuration files, environment variables, and command line arguments. +Refer to the following table for the options that can be configured. + +| CLI Flag | Environment variable | Configuration File | Description | +| --- | --- | --- | --- | +| --config-file | - | - | config file. if omitted, blast.yaml in /etc and home directory will be searched | +| --id | BLAST_ID | id | node ID | +| --raft-address | BLAST_RAFT_ADDRESS | raft_address | Raft server listen address | +| --grpc-address | BLAST_GRPC_ADDRESS | grpc_address | gRPC server listen address | +| --http-address | BLAST_HTTP_ADDRESS | http_address | HTTP server listen address | +| --data-directory | BLAST_DATA_DIRECTORY | data_directory | data directory which store the index and Raft logs | +| --mapping-file | BLAST_MAPPING_FILE | mapping_file | path to the index mapping file | +| --peer-grpc-address | BLAST_PEER_GRPC_ADDRESS | peer_grpc_address | listen address of the existing gRPC server in the joining cluster | +| --certificate-file | BLAST_CERTIFICATE_FILE | certificate_file | path to the client server TLS certificate file | +| --key-file | BLAST_KEY_FILE | key_file | path to the client server TLS key file | +| --common-name | BLAST_COMMON_NAME | common_name | certificate common name | +| --cors-allowed-methods | BLAST_CORS_ALLOWED_METHODS | cors_allowed_methods | CORS allowed methods (ex: GET,PUT,DELETE,POST) | +| --cors-allowed-origins | BLAST_CORS_ALLOWED_ORIGINS | cors_allowed_origins | CORS allowed origins (ex: http://localhost:8080,http://localhost:80) | +| --cors-allowed-headers | BLAST_CORS_ALLOWED_HEADERS | cors_allowed_headers | CORS allowed headers (ex: content-type,x-some-key) | +| --log-level | BLAST_LOG_LEVEL | log_level | log level | +| --log-file | BLAST_LOG_FILE | log_file | log file | +| --log-max-size | BLAST_LOG_MAX_SIZE | log_max_size | max size of a log file in megabytes | +| --log-max-backups | BLAST_LOG_MAX_BACKUPS | log_max_backups | max backup count of log files | +| --log-max-age | BLAST_LOG_MAX_AGE | log_max_age | max age of a log file in days | +| --log-compress | BLAST_LOG_COMPRESS | log_compress | compress a log file | + + +## Start + +Starting server is easy as follows: ```bash -$ make \ - GOOS=darwin \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ - CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ - test +$ ./bin/blast start \ + --id=node1 \ + --raft-address=:7000 \ + --http-address=:8000 \ + --grpc-address=:9000 \ + --data-directory=/tmp/blast/node1 \ + --mapping-file=./examples/example_mapping.json ``` +You can get the node information with the following command: -## Packaging Blast +```bash +$ ./bin/blast node | jq . +``` -### Linux +or the following URL: ```bash -$ make \ - GOOS=linux \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - dist +$ curl -X GET http://localhost:8000/v1/node | jq . ``` +The result of the above command is: + +```json +{ + "node": { + "raft_address": ":7000", + "metadata": { + "grpc_address": ":9000", + "http_address": ":8000" + }, + "state": "Leader" + } +} +``` -#### macOS +## Health check + +You can check the health status of the node. ```bash -$ make \ - GOOS=darwin \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ - CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ - dist +$ ./bin/blast healthcheck | jq . ``` +Also provides the following REST APIs + +### Liveness prove +This endpoint always returns 200 and should be used to check server health. -## Starting Blast in standalone mode +```bash +$ curl -X GET http://localhost:8000/v1/liveness_check | jq . +``` -![standalone](https://user-images.githubusercontent.com/970948/59768879-138f5180-92e0-11e9-8b33-c7b1a93e0893.png) +### Readiness probe -Running a Blast in standalone mode is easy. Start a indexer like so: +This endpoint returns 200 when server is ready to serve traffic (i.e. respond to queries). ```bash -$ ./bin/blastd \ - indexer \ - --node-id=indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ - --data-dir=/tmp/blast/indexer1 \ - --index-mapping-file=./example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb +$ curl -X GET http://localhost:8000/v1/readiness_check | jq . ``` -Please refer to following document for details of index mapping: -- http://blevesearch.com/docs/Terminology/ -- http://blevesearch.com/docs/Text-Analysis/ -- http://blevesearch.com/docs/Index-Mapping/ -- https://github.com/blevesearch/bleve/blob/master/mapping/index.go#L43 +## Put a document -You can now put, get, search and delete the documents via CLI. +To put a document, execute the following command: +```bash +$ ./bin/blast set 1 ' +{ + "fields": { + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "example" + } +} +' | jq . +``` -### Indexing a document via CLI - -For document indexing, execute the following command: +or, you can use the RESTful API as follows: ```bash -$ cat ./example/wiki_doc_enwiki_1.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 enwiki_1 +$ curl -X PUT 'http://127.0.0.1:8000/v1/documents/1' --data-binary ' +{ + "fields": { + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "example" + } +} +' | jq . ``` -You can see the result in JSON format. The result of the above command is: +or ```bash -1 +$ curl -X PUT 'http://127.0.0.1:8000/v1/documents/1' -H "Content-Type: application/json" --data-binary @./examples/example_doc_1.json ``` +## Get a document + +To get a document, execute the following command: -### Getting a document via CLI +```bash +$ ./bin/blast get 1 | jq . +``` -Getting a document is as following: +or, you can use the RESTful API as follows: ```bash -$ ./bin/blast get document --grpc-addr=:5001 enwiki_1 +$ curl -X GET 'http://127.0.0.1:8000/v1/documents/1' | jq . ``` -You can see the result in JSON format. The result of the above command is: +You can see the result. The result of the above command is: ```json { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "fields": { + "_type": "example", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title": "Search engine (computing)" + } } ``` +## Search documents -### Searching documents via CLI - -Searching documents is as like following: +To search documents, execute the following command: ```bash -$ cat ./example/wiki_search_request.json | xargs -0 ./bin/blast search --grpc-addr=:5001 +$ ./bin/blast search ' +{ + "search_request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] + } +} +' | jq . ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: -```json +```bash +$ curl -X POST 'http://127.0.0.1:8000/v1/search' --data-binary ' { - "status": { - "total": 1, - "failed": 0, - "successful": 1 - }, - "request": { + "search_request": { "query": { "query": "+_all:search" }, "size": 10, "from": 0, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] - }, "fields": [ "*" ], - "facets": { - "Contributor count": { - "size": 10, - "field": "contributor" - }, - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "end": "2010-12-31T23:59:59Z", - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z" - }, - { - "end": "2020-12-31T23:59:59Z", - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z" - } - ] - } - }, - "explain": false, "sort": [ "-_score" - ], - "includeLocations": false - }, - "hits": [ - { - "index": "/tmp/blast/indexer1/index", - "id": "enwiki_1", - "score": 0.09703538256409851, - "locations": { - "text_en": { - "search": [ - { - "pos": 2, - "start": 2, - "end": 8, - "array_positions": null - }, - { - "pos": 20, - "start": 118, - "end": 124, - "array_positions": null - }, - { - "pos": 33, - "start": 195, - "end": 201, - "array_positions": null - }, - { - "pos": 68, - "start": 415, - "end": 421, - "array_positions": null - }, - { - "pos": 73, - "start": 438, - "end": 444, - "array_positions": null - }, - { - "pos": 76, - "start": 458, - "end": 466, - "array_positions": null - } - ] + ] + } +} +' | jq . +``` + +You can see the result. The result of the above command is: + +```json +{ + "search_result": { + "facets": null, + "hits": [ + { + "fields": { + "_type": "example", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title": "Search engine (computing)" }, - "title_en": { - "search": [ - { - "pos": 1, - "start": 0, - "end": 6, - "array_positions": null - } - ] - } + "id": "1", + "index": "/tmp/blast/node1/index", + "score": 0.09703538256409851, + "sort": [ + "_score" + ] + } + ], + "max_score": 0.09703538256409851, + "request": { + "explain": false, + "facets": null, + "fields": [ + "*" + ], + "from": 0, + "highlight": null, + "includeLocations": false, + "query": { + "query": "+_all:search" }, + "search_after": null, + "search_before": null, + "size": 10, "sort": [ - "_score" - ], - "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" - } - } - ], - "total_hits": 1, - "max_score": 0.09703538256409851, - "took": 201951, - "facets": { - "Contributor count": { - "field": "contributor", - "total": 0, - "missing": 1, - "other": 0 - }, - "Timestamp range": { - "field": "timestamp", - "total": 1, - "missing": 0, - "other": 0, - "date_ranges": [ - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z", - "count": 1 - } + "-_score" ] - } + }, + "status": { + "failed": 0, + "successful": 1, + "total": 1 + }, + "took": 171880, + "total_hits": 1 } } ``` -Please refer to following document for details of search request and result: -- http://blevesearch.com/docs/Query/ -- http://blevesearch.com/docs/Query-String-Query/ -- http://blevesearch.com/docs/Sorting/ -- https://github.com/blevesearch/bleve/blob/master/search.go#L267 -- https://github.com/blevesearch/bleve/blob/master/search.go#L443 - - -### Deleting a document via CLI +## Delete a document -Deleting a document is as following: +Deleting a document, execute the following command: ```bash -$ ./bin/blast delete document --grpc-addr=:5001 enwiki_1 +$ ./bin/blast delete 1 ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: ```bash -1 +$ curl -X DELETE 'http://127.0.0.1:8000/v1/documents/1' ``` +## Index documents in bulk -### Indexing documents in bulk via CLI - -Indexing documents in bulk, run the following command: +To index documents in bulk, execute the following command: ```bash -$ cat ./example/wiki_bulk_index.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 +$ ./bin/blast bulk-index --file ./examples/example_bulk_index.json ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: ```bash -4 +$ curl -X PUT 'http://127.0.0.1:8000/v1/documents' -H "Content-Type: application/x-ndjson" --data-binary @./examples/example_bulk_index.json ``` +## Delete documents in bulk -### Deleting documents in bulk via CLI - -Deleting documents in bulk, run the following command: +To delete documents in bulk, execute the following command: ```bash -$ cat ./example/wiki_bulk_delete.json | xargs -0 ./bin/blast delete document --grpc-addr=:5001 +$ ./bin/blast bulk-delete --file ./examples/example_bulk_delete.txt ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: ```bash -4 +$ curl -X DELETE 'http://127.0.0.1:8000/v1/documents' -H "Content-Type: text/plain" --data-binary @./examples/example_bulk_delete.txt ``` +## Bringing up a cluster -## Using HTTP REST API - -Also you can do above commands via HTTP REST API that listened port 5002. - - -### Indexing a document via HTTP REST API - -Indexing a document via HTTP is as following: +Blast is easy to bring up the cluster. the node is already running, but that is not fault tolerant. If you need to increase the fault tolerance, bring up 2 more data nodes like so: ```bash -$ curl -X PUT 'http://127.0.0.1:5002/documents/enwiki_1' -d @./example/wiki_doc_enwiki_1.json +$ ./bin/blast start \ + --id=node2 \ + --raft-address=:7001 \ + --http-address=:8001 \ + --grpc-address=:9001 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node2 \ + --mapping-file=./examples/example_mapping.json ``` - -### Getting a document via HTTP REST API - -Getting a document via HTTP is as following: - ```bash -$ curl -X GET 'http://127.0.0.1:5002/documents/enwiki_1' +$ ./bin/blast start \ + --id=node3 \ + --raft-address=:7002 \ + --http-address=:8002 \ + --grpc-address=:9002 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node3 \ + --mapping-file=./examples/example_mapping.json ``` -### Searching documents via HTTP REST API +_Above example shows each Blast node running on the same host, so each node must listen on different ports. This would not be necessary if each node ran on a different host._ -Searching documents via HTTP is as following: +This instructs each new node to join an existing node, each node recognizes the joining clusters when started. +So you have a 3-node cluster. That way you can tolerate the failure of 1 node. You can check the cluster with the following command: ```bash -$ curl -X POST 'http://127.0.0.1:5002/search' -d @./example/wiki_search_request.json +$ ./bin/blast cluster | jq . ``` - -### Deleting a document via HTTP REST API - -Deleting a document via HTTP is as following: +or, you can use the RESTful API as follows: ```bash -$ curl -X DELETE 'http://127.0.0.1:5002/documents/enwiki_1' +$ curl -X GET 'http://127.0.0.1:8000/v1/cluster' | jq . ``` +You can see the result in JSON format. The result of the above command is: -### Indexing documents in bulk via HTTP REST API - -Indexing documents in bulk via HTTP is as following: - -```bash -$ curl -X PUT 'http://127.0.0.1:5002/documents' -d @./example/wiki_bulk_index.json +```json +{ + "cluster": { + "nodes": { + "node1": { + "raft_address": ":7000", + "metadata": { + "grpc_address": ":9000", + "http_address": ":8000" + }, + "state": "Leader" + }, + "node2": { + "raft_address": ":7001", + "metadata": { + "grpc_address": ":9001", + "http_address": ":8001" + }, + "state": "Follower" + }, + "node3": { + "raft_address": ":7002", + "metadata": { + "grpc_address": ":9002", + "http_address": ":8002" + }, + "state": "Follower" + } + }, + "leader": "node1" + } +} ``` +Recommend 3 or more odd number of nodes in the cluster. In failure scenarios, data loss is inevitable, so avoid deploying single nodes. -### Deleting documents in bulk via HTTP REST API - -Deleting documents in bulk via HTTP is as following: +The above example, the node joins to the cluster at startup, but you can also join the node that already started on standalone mode to the cluster later, as follows: ```bash -$ curl -X DELETE 'http://127.0.0.1:5002/documents' -d @./example/wiki_bulk_delete.json +$ ./bin/blast join --grpc-address=:9000 node2 127.0.0.1:9001 ``` - -## Starting Blast in cluster mode - -![cluster](https://user-images.githubusercontent.com/970948/59768677-bf846d00-92df-11e9-8a70-92496ff55ce7.png) - -Blast can easily bring up a cluster. Running a Blast in standalone is not fault tolerant. If you need to improve fault tolerance, start two more indexers as follows: - -First of all, start a indexer in standalone. +or, you can use the RESTful API as follows: ```bash -$ ./bin/blastd \ - indexer \ - --node-id=indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ - --data-dir=/tmp/blast/indexer1 \ - --index-mapping-file=./example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb +$ curl -X PUT 'http://127.0.0.1:8000/v1/cluster/node2' --data-binary ' +{ + "raft_address": ":7001", + "metadata": { + "grpc_address": ":9001", + "http_address": ":8001" + } +} +' ``` -Then, start two more indexers. +To remove a node from the cluster, execute the following command: ```bash -$ ./bin/blastd \ - indexer \ - --peer-addr=:5001 \ - --node-id=indexer2 \ - --bind-addr=:5010 \ - --grpc-addr=:5011 \ - --http-addr=:5012 \ - --data-dir=/tmp/blast/indexer2 - -$ ./bin/blastd \ - indexer \ - --peer-addr=:5001 \ - --node-id=indexer3 \ - --bind-addr=:5020 \ - --grpc-addr=:5021 \ - --http-addr=:5022 \ - --data-dir=/tmp/blast/indexer3 +$ ./bin/blast leave --grpc-address=:9000 node2 ``` -_Above example shows each Blast node running on the same host, so each node must listen on different ports. This would not be necessary if each node ran on a different host._ - -This instructs each new node to join an existing node, specifying `--peer-addr=:5001`. Each node recognizes the joining clusters when started. -So you have a 3-node cluster. That way you can tolerate the failure of 1 node. You can check the peers in the cluster with the following command: - +or, you can use the RESTful API as follows: ```bash -$ ./bin/blast get cluster --grpc-addr=:5001 +$ curl -X DELETE 'http://127.0.0.1:8000/v1/cluster/node2' ``` -You can see the result in JSON format. The result of the above command is: +The following command indexes documents to any node in the cluster: -```json +```bash +$ ./bin/blast set 1 ' { - "indexer1": { - "node_config": { - "bind_addr": ":5000", - "data_dir": "/tmp/blast/indexer1", - "grpc_addr": ":5001", - "http_addr": ":5002", - "node_id": "indexer1", - "raft_storage_type": "boltdb" - }, - "state": "Leader" - }, - "indexer2": { - "node_config": { - "bind_addr": ":5010", - "data_dir": "/tmp/blast/indexer2", - "grpc_addr": ":5011", - "http_addr": ":5012", - "node_id": "indexer2", - "raft_storage_type": "boltdb" - }, - "state": "Follower" - }, - "indexer3": { - "node_config": { - "bind_addr": ":5020", - "data_dir": "/tmp/blast/indexer3", - "grpc_addr": ":5021", - "http_addr": ":5022", - "node_id": "indexer3", - "raft_storage_type": "boltdb" - }, - "state": "Follower" + "fields": { + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "example" } } -``` - -Recommend 3 or more odd number of nodes in the cluster. In failure scenarios, data loss is inevitable, so avoid deploying single nodes. - -The following command indexes documents to any node in the cluster: - -```bash -$ cat ./example/wiki_doc_enwiki_1.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 enwiki_1 +' --grpc-address=:9000 | jq . ``` So, you can get the document from the node specified by the above command as follows: ```bash -$ ./bin/blast get document --grpc-addr=:5001 enwiki_1 +$ ./bin/blast get 1 --grpc-address=:9000 | jq . ``` -You can see the result in JSON format. The result of the above command is: +You can see the result. The result of the above command is: -```json -{ - "_type": "enwiki", - "contributor": "unknown", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" -} +```text +value1 ``` You can also get the same document from other nodes in the cluster as follows: ```bash -$ ./bin/blast get document --grpc-addr=:5011 enwiki_1 -$ ./bin/blast get document --grpc-addr=:5021 enwiki_1 +$ ./bin/blast get 1 --grpc-address=:9001 | jq . +$ ./bin/blast get 1 --grpc-address=:9002 | jq . ``` -You can see the result in JSON format. The result of the above command is: +You can see the result. The result of the above command is: ```json { - "_type": "enwiki", - "contributor": "unknown", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "fields": { + "_type": "example", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title": "Search engine (computing)" + } } ``` -## Starting Blast in federated mode (experimental) - -![federation](https://user-images.githubusercontent.com/970948/59768498-6f0d0f80-92df-11e9-8538-2a1c6e44c30a.png) - -Running a Blast in cluster mode allows you to replicate the index among indexers in a cluster to improve fault tolerance. -However, as the index grows, performance degradation can become an issue. Therefore, instead of providing a large single physical index, it is better to distribute indices across multiple indexers. -Blast provides a federated mode to enable distributed search and indexing. - -Blast provides the following type of node for federation: -- manager: Manager manage common index mappings to index across multiple indexers. It also manages information and status of clusters that participate in the federation. -- dispatcher: Dispatcher is responsible for distributed search or indexing of each indexer. In the case of a index request, send document to each cluster based on the document ID. And in the case of a search request, the same query is sent to each cluster, then the search results are merged and returned to the client. - -### Bring up the manager cluster. - -Manager can also bring up a cluster like an indexer. Specify a common index mapping for federation at startup. - -```bash -$ ./bin/blastd \ - manager \ - --node-id=manager1 \ - --bind-addr=:15000 \ - --grpc-addr=:15001 \ - --http-addr=:15002 \ - --data-dir=/tmp/blast/manager1 \ - --raft-storage-type=badger \ - --index-mapping-file=./example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb - -$ ./bin/blastd \ - manager \ - --peer-addr=:15001 \ - --node-id=manager2 \ - --bind-addr=:15010 \ - --grpc-addr=:15011 \ - --http-addr=:15012 \ - --data-dir=/tmp/blast/manager2 \ - --raft-storage-type=badger - -$ ./bin/blastd \ - manager \ - --peer-addr=:15001 \ - --node-id=manager3 \ - --bind-addr=:15020 \ - --grpc-addr=:15021 \ - --http-addr=:15022 \ - --data-dir=/tmp/blast/manager3 -``` - -### Bring up the indexer cluster. - -Federated mode differs from cluster mode that it specifies the manager in start up to bring up indexer cluster. -The following example starts two 3-node clusters. - -```bash -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster1 \ - --node-id=indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ - --data-dir=/tmp/blast/indexer1 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster1 \ - --node-id=indexer2 \ - --bind-addr=:5010 \ - --grpc-addr=:5011 \ - --http-addr=:5012 \ - --data-dir=/tmp/blast/indexer2 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster1 \ - --node-id=indexer3 \ - --bind-addr=:5020 \ - --grpc-addr=:5021 \ - --http-addr=:5022 \ - --data-dir=/tmp/blast/indexer3 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster2 \ - --node-id=indexer4 \ - --bind-addr=:5030 \ - --grpc-addr=:5031 \ - --http-addr=:5032 \ - --data-dir=/tmp/blast/indexer4 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster2 \ - --node-id=indexer5 \ - --bind-addr=:5040 \ - --grpc-addr=:5041 \ - --http-addr=:5042 \ - --data-dir=/tmp/blast/indexer5 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster2 \ - --node-id=indexer6 \ - --bind-addr=:5050 \ - --grpc-addr=:5051 \ - --http-addr=:5052 \ - --data-dir=/tmp/blast/indexer6 -``` - -### Start up the dispatcher. - -Finally, start the dispatcher with a manager that manages the target federation so that it can perform distributed search and indexing. - -```bash -$ ./bin/blastd \ - dispatcher \ - --manager-addr=:15001 \ - --grpc-addr=:25001 \ - --http-addr=:25002 -``` - -```bash -$ cat ./example/wiki_bulk_index.json | xargs -0 ./bin/blast set document --grpc-addr=:25001 -``` - -```bash -$ cat ./example/wiki_search_request.json | xargs -0 ./bin/blast search --grpc-addr=:25001 -``` - -```bash -$ cat ./example/wiki_bulk_delete.json | xargs -0 ./bin/blast delete document --grpc-addr=:25001 -``` - - +## Docker -## Blast on Docker - -### Building Docker container image on localhost +### Build Docker container image You can build the Docker container image like so: @@ -860,7 +654,7 @@ You can build the Docker container image like so: $ make docker-build ``` -### Pulling Docker container image from docker.io +### Pull Docker container image from docker.io You can also use the Docker container image already registered in docker.io like so: @@ -870,116 +664,101 @@ $ docker pull mosuka/blast:latest See https://hub.docker.com/r/mosuka/blast/tags/ +### Start on Docker -### Pulling Docker container image from docker.io - -You can also use the Docker container image already registered in docker.io like so: +Running a Blast data node on Docker. Start Blast node like so: ```bash -$ docker pull mosuka/blast:latest -``` - - -### Running Indexer on Docker - -Running a Blast data node on Docker. Start Blast data node like so: - -```bash -$ docker run --rm --name blast-indexer1 \ - -p 5000:5000 \ - -p 5001:5001 \ - -p 5002:5002 \ - -v $(pwd)/example:/opt/blast/example \ - mosuka/blast:latest blastd indexer \ - --node-id=blast-indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ - --data-dir=/tmp/blast/indexer1 \ - --index-mapping-file=/opt/blast/example/wiki_index_mapping.json \ - --index-storage-type=leveldb +$ docker run --rm --name blast-node1 \ + -p 7000:7000 \ + -p 8000:8000 \ + -p 9000:9000 \ + -v $(pwd)/etc/blast_mapping.json:/etc/blast_mapping.json \ + mosuka/blast:latest start \ + --id=node1 \ + --raft-address=:7000 \ + --http-address=:8000 \ + --grpc-address=:9000 \ + --data-directory=/tmp/blast/node1 \ + --mapping-file=/etc/blast_mapping.json ``` You can execute the command in docker container as follows: ```bash -$ docker exec -it blast-indexer1 blast-indexer node --grpc-addr=:7070 +$ docker exec -it blast-node1 blast node --grpc-address=:9000 ``` +## Securing Blast -## Wikipedia example - -This section explain how to index Wikipedia dump to Blast. +Blast supports HTTPS access, ensuring that all communication between clients and a cluster is encrypted. +### Generating a certificate and private key -### Install wikiextractor +One way to generate the necessary resources is via [openssl](https://www.openssl.org/). For example: ```bash -$ cd ${HOME} -$ git clone git@github.com:attardi/wikiextractor.git +$ openssl req -x509 -nodes -newkey rsa:4096 -keyout ./etc/blast_key.pem -out ./etc/blast_cert.pem -days 365 -subj '/CN=localhost' +Generating a 4096 bit RSA private key +............................++ +........++ +writing new private key to 'key.pem' ``` +### Secure cluster example -### Download wikipedia dump +Starting a node with HTTPS enabled, node-to-node encryption, and with the above configuration file. It is assumed the HTTPS X.509 certificate and key are at the paths server.crt and key.pem respectively. ```bash -$ curl -o ~/tmp/enwiki-20190101-pages-articles.xml.bz2 https://dumps.wikimedia.org/enwiki/20190101/enwiki-20190101-pages-articles.xml.bz2 +$ ./bin/blast start \ + --id=node1 \ + --raft-address=:7000 \ + --http-address=:8000 \ + --grpc-address=:9000 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node1 \ + --mapping-file=./etc/blast_mapping.json \ + --certificate-file=./etc/blast_cert.pem \ + --key-file=./etc/blast_key.pem \ + --common-name=localhost ``` - -### Parsing wikipedia dump - ```bash -$ cd wikiextractor -$ ./WikiExtractor.py -o ~/tmp/enwiki --json ~/tmp/enwiki-20190101-pages-articles.xml.bz2 -``` - - -### Indexing wikipedia dump - -```shell -$ for FILE in $(find ~/tmp/enwiki -type f -name '*' | sort) - do - echo "Indexing ${FILE}" - TIMESTAMP=$(date -u "+%Y-%m-%dT%H:%M:%SZ") - DOCS=$(cat ${FILE} | jq -r '. + {fields: {url: .url, title_en: .title, text_en: .text, timestamp: "'${TIMESTAMP}'", _type: "enwiki"}} | del(.url) | del(.title) | del(.text) | del(.fields.id)' | jq -s) - curl -s -X PUT -H 'Content-Type: application/json' "http://127.0.0.1:5002/documents" -d "${DOCS}" - done +$ ./bin/blast start \ + --id=node2 \ + --raft-address=:7001 \ + --http-address=:8001 \ + --grpc-address=:9001 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node2 \ + --mapping-file=./etc/blast_mapping.json \ + --certificate-file=./etc/blast_cert.pem \ + --key-file=./etc/blast_key.pem \ + --common-name=localhost ``` - -## Spatial/Geospatial search example - -This section explain how to index Spatial/Geospatial data to Blast. - -### Starting Indexer with Spatial/Geospatial index mapping - ```bash -$ ./bin/blastd \ - indexer \ - --node-id=indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ - --data-dir=/tmp/blast/indexer1 \ - --index-mapping-file=./example/geo_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb +$ ./bin/blast start \ + --id=node3 \ + --raft-address=:7002 \ + --http-address=:8002 \ + --grpc-address=:9002 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node3 \ + --mapping-file=./etc/blast_mapping.json \ + --certificate-file=./etc/blast_cert.pem \ + --key-file=./etc/blast_key.pem \ + --common-name=localhost ``` -### Indexing example Spatial/Geospatial data +You can access the cluster by adding a flag, such as the following command: ```bash -$ cat ./example/geo_doc1.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc1 -$ cat ./example/geo_doc2.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc2 -$ cat ./example/geo_doc3.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc3 -$ cat ./example/geo_doc4.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc4 -$ cat ./example/geo_doc5.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc5 -$ cat ./example/geo_doc6.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc6 +$ ./bin/blast cluster --grpc-address=:9000 --certificate-file=./etc/blast_cert.pem --common-name=localhost | jq . ``` -### Searching example Spatial/Geospatial data +or ```bash -$ cat ./example/geo_search_request.json | xargs -0 ./bin/blast search --grpc-addr=:5001 +$ curl -X GET https://localhost:8000/v1/cluster --cacert ./etc/cert.pem | jq . ``` diff --git a/builtin/config_bleve.go b/builtin/config_bleve.go new file mode 100644 index 0000000..e69a7b9 --- /dev/null +++ b/builtin/config_bleve.go @@ -0,0 +1,5 @@ +package builtin + +import ( + _ "github.com/blevesearch/bleve/v2/config" +) diff --git a/builtins/config_badger.go b/builtins/config_badger.go deleted file mode 100644 index b920c65..0000000 --- a/builtins/config_badger.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build badger full - -package builtins - -import ( - _ "github.com/mosuka/bbadger" -) diff --git a/builtins/config_bleve.go b/builtins/config_bleve.go deleted file mode 100644 index 031bf9a..0000000 --- a/builtins/config_bleve.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package builtins - -import ( - _ "github.com/blevesearch/bleve/config" -) diff --git a/client/grpc_client.go b/client/grpc_client.go new file mode 100644 index 0000000..c00fd97 --- /dev/null +++ b/client/grpc_client.go @@ -0,0 +1,218 @@ +package client + +import ( + "context" + "log" + "math" + "time" + + "github.com/golang/protobuf/ptypes/empty" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" +) + +type GRPCClient struct { + ctx context.Context + cancel context.CancelFunc + conn *grpc.ClientConn + client protobuf.IndexClient + + logger *log.Logger +} + +func NewGRPCClient(grpc_address string) (*GRPCClient, error) { + return NewGRPCClientWithContext(grpc_address, context.Background()) +} + +func NewGRPCClientWithContext(grpc_address string, baseCtx context.Context) (*GRPCClient, error) { + return NewGRPCClientWithContextTLS(grpc_address, baseCtx, "", "") +} + +func NewGRPCClientWithContextTLS(grpcAddress string, baseCtx context.Context, certificateFile string, commonName string) (*GRPCClient, error) { + dialOpts := []grpc.DialOption{ + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(math.MaxInt64), + grpc.MaxCallRecvMsgSize(math.MaxInt64), + ), + grpc.WithKeepaliveParams( + keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 5 * time.Second, + PermitWithoutStream: true, + }, + ), + } + + ctx, cancel := context.WithCancel(baseCtx) + + if certificateFile == "" { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } else { + creds, err := credentials.NewClientTLSFromFile(certificateFile, commonName) + if err != nil { + return nil, err + } + dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) + } + + conn, err := grpc.DialContext(ctx, grpcAddress, dialOpts...) + if err != nil { + cancel() + return nil, err + } + + return &GRPCClient{ + ctx: ctx, + cancel: cancel, + conn: conn, + client: protobuf.NewIndexClient(conn), + }, nil +} + +func (c *GRPCClient) Close() error { + c.cancel() + if c.conn != nil { + return c.conn.Close() + } + + return c.ctx.Err() +} + +func (c *GRPCClient) Target() string { + return c.conn.Target() +} + +func (c *GRPCClient) LivenessCheck(opts ...grpc.CallOption) (*protobuf.LivenessCheckResponse, error) { + if resp, err := c.client.LivenessCheck(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) ReadinessCheck(opts ...grpc.CallOption) (*protobuf.ReadinessCheckResponse, error) { + if resp, err := c.client.ReadinessCheck(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Join(req *protobuf.JoinRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Join(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Leave(req *protobuf.LeaveRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Leave(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Node(opts ...grpc.CallOption) (*protobuf.NodeResponse, error) { + if resp, err := c.client.Node(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Cluster(opts ...grpc.CallOption) (*protobuf.ClusterResponse, error) { + if resp, err := c.client.Cluster(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { + if _, err := c.client.Snapshot(c.ctx, &empty.Empty{}); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Get(req *protobuf.GetRequest, opts ...grpc.CallOption) (*protobuf.GetResponse, error) { + if resp, err := c.client.Get(c.ctx, req, opts...); err != nil { + st, _ := status.FromError(err) + switch st.Code() { + case codes.NotFound: + return nil, errors.ErrNotFound + default: + return nil, err + } + } else { + return resp, nil + } +} + +func (c *GRPCClient) Search(req *protobuf.SearchRequest, opts ...grpc.CallOption) (*protobuf.SearchResponse, error) { + if resp, err := c.client.Search(c.ctx, req, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Set(req *protobuf.SetRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Set(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Delete(req *protobuf.DeleteRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Delete(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) BulkIndex(req *protobuf.BulkIndexRequest, opts ...grpc.CallOption) (*protobuf.BulkIndexResponse, error) { + if resp, err := c.client.BulkIndex(c.ctx, req, opts...); err == nil { + return resp, nil + } else { + return nil, err + } +} + +func (c *GRPCClient) BulkDelete(req *protobuf.BulkDeleteRequest, opts ...grpc.CallOption) (*protobuf.BulkDeleteResponse, error) { + if resp, err := c.client.BulkDelete(c.ctx, req, opts...); err == nil { + return resp, nil + } else { + return nil, err + } +} + +func (c *GRPCClient) Mapping(opts ...grpc.CallOption) (*protobuf.MappingResponse, error) { + if resp, err := c.client.Mapping(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Watch(req *empty.Empty, opts ...grpc.CallOption) (protobuf.Index_WatchClient, error) { + return c.client.Watch(c.ctx, req, opts...) +} + +func (c *GRPCClient) Metrics(opts ...grpc.CallOption) (*protobuf.MetricsResponse, error) { + if resp, err := c.client.Metrics(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} diff --git a/cmd/blast/delete_document.go b/cmd/blast/delete_document.go deleted file mode 100644 index 558e959..0000000 --- a/cmd/blast/delete_document.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execDeleteDocument(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - // create documents - ids := make([]string, 0) - - // documents - idsStr := c.Args().Get(0) - - err := json.Unmarshal([]byte(idsStr), &ids) - if err != nil { - switch err.(type) { - case *json.SyntaxError: - ids = append(ids, idsStr) - default: - return err - } - } - - // create client - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - result, err := client.DeleteDocument(ids) - if err != nil { - return err - } - - resultBytes, err := json.MarshalIndent(result, "", " ") - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) - - return nil -} diff --git a/cmd/blast/delete_node.go b/cmd/blast/delete_node.go deleted file mode 100644 index d2d7566..0000000 --- a/cmd/blast/delete_node.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execDeleteNode(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - nodeId := c.Args().Get(0) - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - err = client.DeleteNode(nodeId) - if err != nil { - return err - } - - return nil -} diff --git a/cmd/blast/delete_value.go b/cmd/blast/delete_value.go deleted file mode 100644 index 93413e4..0000000 --- a/cmd/blast/delete_value.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execDeleteValue(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - key := c.Args().Get(0) - if key == "" { - err := errors.New("key argument must be set") - return err - } - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - err = client.DeleteValue(key) - if err != nil { - return err - } - - return nil -} diff --git a/cmd/blast/get_cluster.go b/cmd/blast/get_cluster.go deleted file mode 100644 index 1c123af..0000000 --- a/cmd/blast/get_cluster.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execGetCluster(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - cluster, err := client.GetCluster() - if err != nil { - return err - } - - clusterBytes, err := json.MarshalIndent(cluster, "", " ") - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) - - return nil -} diff --git a/cmd/blast/get_document.go b/cmd/blast/get_document.go deleted file mode 100644 index 98b31de..0000000 --- a/cmd/blast/get_document.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execGetDocument(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - id := c.Args().Get(0) - if id == "" { - err := errors.New("arguments are not correct") - return err - } - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - fields, err := client.GetDocument(id) - if err != nil { - return err - } - - fieldsBytes, err := json.MarshalIndent(fields, "", " ") - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(fieldsBytes))) - - return nil -} diff --git a/cmd/blast/get_node.go b/cmd/blast/get_node.go deleted file mode 100644 index c7c8271..0000000 --- a/cmd/blast/get_node.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execGetNode(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - nodeId := c.Args().Get(0) - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - metadata, err := client.GetNode(nodeId) - if err != nil { - return err - } - - metadataBytes, err := json.MarshalIndent(metadata, "", " ") - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(metadataBytes))) - - return nil -} diff --git a/cmd/blast/get_value.go b/cmd/blast/get_value.go deleted file mode 100644 index d4ec2ed..0000000 --- a/cmd/blast/get_value.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execGetValue(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - key := c.Args().Get(0) - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - value, err := client.GetValue(key) - if err != nil { - return err - } - - valueBytes, err := json.MarshalIndent(value, "", " ") - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(valueBytes))) - - return nil -} diff --git a/cmd/blast/livenessprobe.go b/cmd/blast/livenessprobe.go deleted file mode 100644 index b801fd8..0000000 --- a/cmd/blast/livenessprobe.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execLivenessProbe(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - state, err := client.LivenessProbe() - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) - - return nil -} diff --git a/cmd/blast/main.go b/cmd/blast/main.go deleted file mode 100644 index 32e9e87..0000000 --- a/cmd/blast/main.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - "path" - - "github.com/mosuka/blast/version" - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Name = path.Base(os.Args[0]) - app.Usage = "blast" - app.Version = version.Version - app.Authors = []cli.Author{ - { - Name: "mosuka", - Email: "minoru.osuka@gmail.com", - }, - } - app.Commands = []cli.Command{ - { - Name: "livenessprobe", - Usage: "liveness probe", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - Action: execLivenessProbe, - }, - { - Name: "readinessprobe", - Usage: "readiness probe", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - Action: execReadinessProbe, - }, - { - Name: "get", - Usage: "get", - Subcommands: []cli.Command{ - { - Name: "node", - Usage: "get node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[id]", - Action: execGetNode, - }, - { - Name: "cluster", - Usage: "get cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - Action: execGetCluster, - }, - { - Name: "value", - Usage: "get value", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[key]", - Action: execGetValue, - }, - { - Name: "document", - Usage: "get document", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[id]", - Action: execGetDocument, - }, - }, - }, - { - Name: "set", - Usage: "set", - Subcommands: []cli.Command{ - { - Name: "node", - Usage: "set node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[id] [metadata]", - Action: execSetNode, - }, - { - Name: "value", - Usage: "set value", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[key] [value]", - Action: execSetValue, - }, - { - Name: "document", - Usage: "set document", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[documents | [id] [fields]]", - Action: execSetDocument, - }, - }, - }, - { - Name: "delete", - Usage: "delete", - Subcommands: []cli.Command{ - { - Name: "node", - Usage: "delete node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[id]", - Action: execDeleteNode, - }, - { - Name: "value", - Usage: "delete value", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[key] [value]", - Action: execDeleteValue, - }, - { - Name: "document", - Usage: "delete document", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[id] ...", - Action: execDeleteDocument, - }, - }, - }, - { - Name: "watch", - Usage: "watch", - Subcommands: []cli.Command{ - { - Name: "cluster", - Usage: "watch cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - Action: execWatchCluster, - }, - { - Name: "store", - Usage: "watch store", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[key]", - Action: execWatchStore, - }, - }, - }, - { - Name: "snapshot", - Usage: "snapshot data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - Action: execSnapshot, - }, - { - Name: "search", - Usage: "search documents", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - ArgsUsage: "[search request]", - Action: execSearch, - }, - } - - cli.HelpFlag = cli.BoolFlag{ - Name: "help, h", - Usage: "Show this message", - } - cli.VersionFlag = cli.BoolFlag{ - Name: "version, v", - Usage: "Print the version", - } - - err := app.Run(os.Args) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } -} diff --git a/cmd/blast/readinessprobe.go b/cmd/blast/readinessprobe.go deleted file mode 100644 index 36bf970..0000000 --- a/cmd/blast/readinessprobe.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execReadinessProbe(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - state, err := client.ReadinessProbe() - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) - - return nil -} diff --git a/cmd/blast/search.go b/cmd/blast/search.go deleted file mode 100644 index 174aa83..0000000 --- a/cmd/blast/search.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "os" - - "github.com/blevesearch/bleve" - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execSearch(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - searchRequestStr := c.Args().Get(0) - if searchRequestStr == "" { - err := errors.New("key argument must be set") - return err - } - - // string -> bleve.SearchRequest - searchRequest := bleve.NewSearchRequest(nil) - if searchRequestStr != "" { - err := json.Unmarshal([]byte(searchRequestStr), searchRequest) - if err != nil { - return err - } - } - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - searchResult, err := client.Search(searchRequest) - if err != nil { - return err - } - - jsonBytes, err := json.MarshalIndent(&searchResult, "", " ") - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(jsonBytes))) - - return nil -} diff --git a/cmd/blast/set_document.go b/cmd/blast/set_document.go deleted file mode 100644 index fded562..0000000 --- a/cmd/blast/set_document.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execSetDocument(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - // create documents - docs := make([]map[string]interface{}, 0) - - if c.NArg() == 1 { - // documents - docsStr := c.Args().Get(0) - - err := json.Unmarshal([]byte(docsStr), &docs) - if err != nil { - return err - } - } else if c.NArg() == 2 { - // document - id := c.Args().Get(0) - fieldsSrc := c.Args().Get(1) - - // string -> map[string]interface{} - var fields map[string]interface{} - err := json.Unmarshal([]byte(fieldsSrc), &fields) - if err != nil { - return err - } - - // create document - doc := map[string]interface{}{ - "id": id, - "fields": fields, - } - - docs = append(docs, doc) - } else { - return errors.New("argument error") - } - - // create gRPC client - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - // index documents in bulk - count, err := client.IndexDocument(docs) - if err != nil { - return err - } - - resultBytes, err := json.MarshalIndent(count, "", " ") - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) - - return nil -} diff --git a/cmd/blast/set_node.go b/cmd/blast/set_node.go deleted file mode 100644 index a32052a..0000000 --- a/cmd/blast/set_node.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execSetNode(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - nodeId := c.Args().Get(0) - - metadataStr := c.Args().Get(1) - - var metadata map[string]interface{} - err := json.Unmarshal([]byte(metadataStr), &metadata) - if err != nil { - return err - } - - peerClient, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := peerClient.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - err = peerClient.SetNode(nodeId, metadata) - if err != nil { - return err - } - - return nil -} diff --git a/cmd/blast/set_value.go b/cmd/blast/set_value.go deleted file mode 100644 index 04c3032..0000000 --- a/cmd/blast/set_value.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execSetValue(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - key := c.Args().Get(0) - if key == "" { - err := errors.New("key argument must be set") - return err - } - - valueStr := c.Args().Get(1) - if valueStr == "" { - err := errors.New("value argument must be set") - return err - } - - var value interface{} - err := json.Unmarshal([]byte(valueStr), &value) - if err != nil { - switch err.(type) { - case *json.SyntaxError: - value = valueStr - default: - return err - } - } - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - err = client.SetValue(key, value) - if err != nil { - return err - } - - return nil -} diff --git a/cmd/blast/snapshot.go b/cmd/blast/snapshot.go deleted file mode 100644 index 7e3ec46..0000000 --- a/cmd/blast/snapshot.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execSnapshot(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - err = client.Snapshot() - if err != nil { - return err - } - - return nil -} diff --git a/cmd/blast/watch_cluster.go b/cmd/blast/watch_cluster.go deleted file mode 100644 index cb2e267..0000000 --- a/cmd/blast/watch_cluster.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "log" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/protobuf" - "github.com/urfave/cli" -) - -func execWatchCluster(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - err = execGetCluster(c) - if err != nil { - return err - } - - watchClient, err := client.WatchCluster() - if err != nil { - return err - } - - for { - resp, err := watchClient.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Println(err.Error()) - break - } - - cluster, err := protobuf.MarshalAny(resp.Cluster) - if err != nil { - return err - } - if cluster == nil { - return errors.New("nil") - } - - var clusterBytes []byte - clusterMap := *cluster.(*map[string]interface{}) - clusterBytes, err = json.MarshalIndent(clusterMap, "", " ") - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) - } - - return nil -} diff --git a/cmd/blast/watch_store.go b/cmd/blast/watch_store.go deleted file mode 100644 index e10b1c5..0000000 --- a/cmd/blast/watch_store.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "log" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/protobuf" - "github.com/urfave/cli" -) - -func execWatchStore(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - key := c.Args().Get(0) - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - watchClient, err := client.WatchStore(key) - if err != nil { - return err - } - - for { - resp, err := watchClient.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Println(err.Error()) - break - } - - value, err := protobuf.MarshalAny(resp.Value) - if err != nil { - return err - } - if value == nil { - return errors.New("nil") - } - - var valueBytes []byte - switch value.(type) { - case *map[string]interface{}: - valueMap := *value.(*map[string]interface{}) - valueBytes, err = json.MarshalIndent(valueMap, "", " ") - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%s %s %v", resp.Command.String(), resp.Key, string(valueBytes))) - case *string: - valueStr := *value.(*string) - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%s %s %s", resp.Command.String(), resp.Key, valueStr)) - default: - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%s %s %v", resp.Command.String(), resp.Key, &value)) - } - } - - return nil -} diff --git a/cmd/blastd/dispatcher.go b/cmd/blastd/dispatcher.go deleted file mode 100644 index 95ad75b..0000000 --- a/cmd/blastd/dispatcher.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/logutils" - "github.com/urfave/cli" -) - -func startDispatcher(c *cli.Context) error { - logLevel := c.GlobalString("log-level") - logFilename := c.GlobalString("log-file") - logMaxSize := c.GlobalInt("log-max-size") - logMaxBackups := c.GlobalInt("log-max-backups") - logMaxAge := c.GlobalInt("log-max-age") - logCompress := c.GlobalBool("log-compress") - - grpcLogLevel := c.GlobalString("grpc-log-level") - grpcLogFilename := c.GlobalString("grpc-log-file") - grpcLogMaxSize := c.GlobalInt("grpc-log-max-size") - grpcLogMaxBackups := c.GlobalInt("grpc-log-max-backups") - grpcLogMaxAge := c.GlobalInt("grpc-log-max-age") - grpcLogCompress := c.GlobalBool("grpc-log-compress") - - httpAccessLogFilename := c.GlobalString("http-access-log-file") - httpAccessLogMaxSize := c.GlobalInt("http-access-log-max-size") - httpAccessLogMaxBackups := c.GlobalInt("http-access-log-max-backups") - httpAccessLogMaxAge := c.GlobalInt("http-access-log-max-age") - httpAccessLogCompress := c.GlobalBool("http-access-log-compress") - - managerAddr := c.String("manager-addr") - - grpcAddr := c.String("grpc-addr") - httpAddr := c.String("http-addr") - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create logger - grpcLogger := logutils.NewGRPCLogger( - grpcLogLevel, - grpcLogFilename, - grpcLogMaxSize, - grpcLogMaxBackups, - grpcLogMaxAge, - grpcLogCompress, - ) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpAccessLogFilename, - httpAccessLogMaxSize, - httpAccessLogMaxBackups, - httpAccessLogMaxAge, - httpAccessLogCompress, - ) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - if managerAddr != "" { - clusterConfig.ManagerAddr = managerAddr - } - - // create node config - nodeConfig := &config.NodeConfig{ - GRPCAddr: grpcAddr, - HTTPAddr: httpAddr, - } - - svr, err := dispatcher.NewServer(clusterConfig, nodeConfig, logger, grpcLogger, httpAccessLogger) - if err != nil { - return err - } - - quitCh := make(chan os.Signal, 1) - signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - go svr.Start() - - <-quitCh - - svr.Stop() - - return nil -} diff --git a/cmd/blastd/indexer.go b/cmd/blastd/indexer.go deleted file mode 100644 index 04bf1af..0000000 --- a/cmd/blastd/indexer.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/urfave/cli" -) - -func startIndexer(c *cli.Context) error { - logLevel := c.GlobalString("log-level") - logFilename := c.GlobalString("log-file") - logMaxSize := c.GlobalInt("log-max-size") - logMaxBackups := c.GlobalInt("log-max-backups") - logMaxAge := c.GlobalInt("log-max-age") - logCompress := c.GlobalBool("log-compress") - - grpcLogLevel := c.GlobalString("grpc-log-level") - grpcLogFilename := c.GlobalString("grpc-log-file") - grpcLogMaxSize := c.GlobalInt("grpc-log-max-size") - grpcLogMaxBackups := c.GlobalInt("grpc-log-max-backups") - grpcLogMaxAge := c.GlobalInt("grpc-log-max-age") - grpcLogCompress := c.GlobalBool("grpc-log-compress") - - httpAccessLogFilename := c.GlobalString("http-access-log-file") - httpAccessLogMaxSize := c.GlobalInt("http-access-log-max-size") - httpAccessLogMaxBackups := c.GlobalInt("http-access-log-max-backups") - httpAccessLogMaxAge := c.GlobalInt("http-access-log-max-age") - httpAccessLogCompress := c.GlobalBool("http-access-log-compress") - - managerAddr := c.String("manager-addr") - clusterId := c.String("cluster-id") - - nodeId := c.String("node-id") - bindAddr := c.String("bind-addr") - grpcAddr := c.String("grpc-addr") - httpAddr := c.String("http-addr") - dataDir := c.String("data-dir") - raftStorageType := c.String("raft-storage-type") - peerAddr := c.String("peer-addr") - - indexMappingFile := c.String("index-mapping-file") - indexType := c.String("index-type") - indexStorageType := c.String("index-storage-type") - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create logger - grpcLogger := logutils.NewGRPCLogger( - grpcLogLevel, - grpcLogFilename, - grpcLogMaxSize, - grpcLogMaxBackups, - grpcLogMaxAge, - grpcLogCompress, - ) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpAccessLogFilename, - httpAccessLogMaxSize, - httpAccessLogMaxBackups, - httpAccessLogMaxAge, - httpAccessLogCompress, - ) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - if managerAddr != "" { - clusterConfig.ManagerAddr = managerAddr - } - if clusterId != "" { - clusterConfig.ClusterId = clusterId - } - if peerAddr != "" { - clusterConfig.PeerAddr = peerAddr - } - - // create node config - nodeConfig := &config.NodeConfig{ - NodeId: nodeId, - BindAddr: bindAddr, - GRPCAddr: grpcAddr, - HTTPAddr: httpAddr, - DataDir: dataDir, - RaftStorageType: raftStorageType, - } - - var err error - - // create index mapping - var indexMapping *mapping.IndexMappingImpl - if indexMappingFile != "" { - indexMapping, err = indexutils.NewIndexMappingFromFile(indexMappingFile) - if err != nil { - return err - } - } else { - indexMapping = mapping.NewIndexMapping() - } - - // create index config - indexConfig := &config.IndexConfig{ - IndexMapping: indexMapping, - IndexType: indexType, - IndexStorageType: indexStorageType, - } - - svr, err := indexer.NewServer(clusterConfig, nodeConfig, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) - if err != nil { - return err - } - - quitCh := make(chan os.Signal, 1) - signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - go svr.Start() - - <-quitCh - - svr.Stop() - - return nil -} diff --git a/cmd/blastd/main.go b/cmd/blastd/main.go deleted file mode 100644 index e69e527..0000000 --- a/cmd/blastd/main.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - "path" - - "github.com/blevesearch/bleve" - "github.com/mosuka/blast/version" - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Name = path.Base(os.Args[0]) - app.Usage = "blastd" - app.Version = version.Version - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "INFO", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-access-log-file", - Value: os.Stderr.Name(), - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-access-log-max-size", - Value: 500, - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-access-log-max-backups", - Value: 3, - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-access-log-max-age", - Value: 30, - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-access-log-compress", - Usage: "Compress a HTTP access log", - }, - } - app.Authors = []cli.Author{ - { - Name: "mosuka", - Email: "minoru.osuka@gmail.com", - }, - } - app.Commands = []cli.Command{ - { - Name: "indexer", - Usage: "Start indexer", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-addr", - Value: "", - Usage: "The gRPC address of the manager node that exists in the federation to be joined", - }, - cli.StringFlag{ - Name: "cluster-id", - Value: "default", - Usage: "Cluster ID", - }, - cli.StringFlag{ - Name: "node-id", - Value: "indexer1", - Usage: "Node ID", - }, - cli.StringFlag{ - Name: "bind-addr", - Value: ":5000", - Usage: "Raft bind address", - }, - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "gRPC Server listen address", - }, - cli.StringFlag{ - Name: "http-addr", - Value: ":5002", - Usage: "HTTP server listen address", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "/tmp/blast-index", - Usage: "Data directory", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - Usage: "Raft log storage type to use", - }, - cli.StringFlag{ - Name: "peer-addr", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - Usage: "Path to a file containing a JSON representation of an index mapping to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - Usage: "Index storage type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - Usage: "Index storage type to use", - }, - }, - Action: startIndexer, - }, - { - Name: "manager", - Usage: "Start manager", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "node-id", - Value: "", - Usage: "Node ID", - }, - cli.StringFlag{ - Name: "bind-addr", - Value: ":15000", - Usage: "The address that should be used to for internal cluster communications", - }, - cli.StringFlag{ - Name: "grpc-addr", - Value: ":15001", - Usage: "The address that should be used to for client communications over gRPC", - }, - cli.StringFlag{ - Name: "http-addr", - Value: ":15002", - Usage: "The address that should be used to for client communications over HTTP", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "./", - Usage: "Data directory", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - Usage: "Raft log storage type to use", - }, - cli.StringFlag{ - Name: "peer-addr", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - Usage: "Path to a file containing a JSON representation of an index mapping to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - Usage: "Index storage type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - Usage: "Index storage type to use", - }, - }, - Action: startManager, - }, - { - Name: "dispatcher", - Usage: "Start dispatcher", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-addr", - Value: ":15001", - Usage: "Manager address", - }, - cli.StringFlag{ - Name: "grpc-addr", - Value: ":25001", - Usage: "gRPC Server listen address", - }, - cli.StringFlag{ - Name: "http-addr", - Value: ":25002", - Usage: "HTTP server listen address", - }, - }, - Action: startDispatcher, - }, - } - - cli.HelpFlag = cli.BoolFlag{ - Name: "help, h", - Usage: "Show this message", - } - cli.VersionFlag = cli.BoolFlag{ - Name: "version, v", - Usage: "Print the version", - } - - err := app.Run(os.Args) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } -} diff --git a/cmd/blastd/manager.go b/cmd/blastd/manager.go deleted file mode 100644 index da15dd4..0000000 --- a/cmd/blastd/manager.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/manager" - "github.com/urfave/cli" -) - -func startManager(c *cli.Context) error { - logLevel := c.GlobalString("log-level") - logFilename := c.GlobalString("log-file") - logMaxSize := c.GlobalInt("log-max-size") - logMaxBackups := c.GlobalInt("log-max-backups") - logMaxAge := c.GlobalInt("log-max-age") - logCompress := c.GlobalBool("log-compress") - - grpcLogLevel := c.GlobalString("grpc-log-level") - grpcLogFilename := c.GlobalString("grpc-log-file") - grpcLogMaxSize := c.GlobalInt("grpc-log-max-size") - grpcLogMaxBackups := c.GlobalInt("grpc-log-max-backups") - grpcLogMaxAge := c.GlobalInt("grpc-log-max-age") - grpcLogCompress := c.GlobalBool("grpc-log-compress") - - httpAccessLogFilename := c.GlobalString("http-access-log-file") - httpAccessLogMaxSize := c.GlobalInt("http-access-log-max-size") - httpAccessLogMaxBackups := c.GlobalInt("http-access-log-max-backups") - httpAccessLogMaxAge := c.GlobalInt("http-access-log-max-age") - httpAccessLogCompress := c.GlobalBool("http-access-log-compress") - - nodeId := c.String("node-id") - bindAddr := c.String("bind-addr") - grpcAddr := c.String("grpc-addr") - httpAddr := c.String("http-addr") - dataDir := c.String("data-dir") - raftStorageType := c.String("raft-storage-type") - peerAddr := c.String("peer-addr") - - indexMappingFile := c.String("index-mapping-file") - indexType := c.String("index-type") - indexStorageType := c.String("index-storage-type") - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create logger - grpcLogger := logutils.NewGRPCLogger( - grpcLogLevel, - grpcLogFilename, - grpcLogMaxSize, - grpcLogMaxBackups, - grpcLogMaxAge, - grpcLogCompress, - ) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpAccessLogFilename, - httpAccessLogMaxSize, - httpAccessLogMaxBackups, - httpAccessLogMaxAge, - httpAccessLogCompress, - ) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - if peerAddr != "" { - clusterConfig.PeerAddr = peerAddr - } - - // create node config - nodeConfig := &config.NodeConfig{ - NodeId: nodeId, - BindAddr: bindAddr, - GRPCAddr: grpcAddr, - HTTPAddr: httpAddr, - DataDir: dataDir, - RaftStorageType: raftStorageType, - } - - var err error - - // create index mapping - var indexMapping *mapping.IndexMappingImpl - if indexMappingFile != "" { - indexMapping, err = indexutils.NewIndexMappingFromFile(indexMappingFile) - if err != nil { - return err - } - } else { - indexMapping = mapping.NewIndexMapping() - } - - // create index config - indexConfig := &config.IndexConfig{ - IndexMapping: indexMapping, - IndexType: indexType, - IndexStorageType: indexStorageType, - } - - svr, err := manager.NewServer(clusterConfig, nodeConfig, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) - if err != nil { - return err - } - - quitCh := make(chan os.Signal, 1) - signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - go svr.Start() - - <-quitCh - - svr.Stop() - - return nil -} diff --git a/cmd/bulk_delete.go b/cmd/bulk_delete.go new file mode 100644 index 0000000..603eeb9 --- /dev/null +++ b/cmd/bulk_delete.go @@ -0,0 +1,129 @@ +package cmd + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "strings" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + bulkDeleteCmd = &cobra.Command{ + Use: "bulk-delete", + Short: "Delete a document", + Long: "Delete a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + req := &protobuf.BulkDeleteRequest{ + Requests: make([]*protobuf.DeleteRequest, 0), + } + + var reader *bufio.Reader + if file != "" { + // from file + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + reader = bufio.NewReader(f) + } else { + // from stdin + reader = bufio.NewReader(os.Stdin) + } + + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + req.Requests = append(req.Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + req.Requests = append(req.Requests, r) + } + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + count, err := c.BulkDelete(req) + if err != nil { + return err + } + + fmt.Println(count) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(bulkDeleteCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + bulkDeleteCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + bulkDeleteCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + bulkDeleteCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + bulkDeleteCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + bulkDeleteCmd.PersistentFlags().StringVar(&file, "file", "", "path to the file that documents have written in NDJSON(JSONL) format") + + _ = viper.BindPFlag("grpc_address", bulkDeleteCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", bulkDeleteCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", bulkDeleteCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/bulk_index.go b/cmd/bulk_index.go new file mode 100644 index 0000000..56293b0 --- /dev/null +++ b/cmd/bulk_index.go @@ -0,0 +1,135 @@ +package cmd + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + bulkIndexCmd = &cobra.Command{ + Use: "bulk-index", + Short: "Index documents in bulk", + Long: "Index documents in bulk", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + m := marshaler.BlastMarshaler{} + + req := &protobuf.BulkIndexRequest{ + Requests: make([]*protobuf.SetRequest, 0), + } + + var reader *bufio.Reader + if file != "" { + // from file + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + reader = bufio.NewReader(f) + } else { + // from stdin + reader = bufio.NewReader(os.Stdin) + } + + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + err := m.Unmarshal(docBytes, r) + if err != nil { + continue + } + req.Requests = append(req.Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + err := m.Unmarshal(docBytes, r) + if err != nil { + continue + } + req.Requests = append(req.Requests, r) + } + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + count, err := c.BulkIndex(req) + if err != nil { + return err + } + + fmt.Println(count) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(bulkIndexCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + bulkIndexCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + bulkIndexCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + bulkIndexCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + bulkIndexCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + bulkIndexCmd.PersistentFlags().StringVar(&file, "file", "", "path to the file that documents have written in NDJSON(JSONL) format") + + _ = viper.BindPFlag("grpc_address", bulkIndexCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", bulkIndexCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", bulkIndexCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/cluster.go b/cmd/cluster.go new file mode 100644 index 0000000..ef78f42 --- /dev/null +++ b/cmd/cluster.go @@ -0,0 +1,90 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + clusterCmd = &cobra.Command{ + Use: "cluster", + Short: "Get the cluster info", + Long: "Get the cluster info", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Cluster() + if err != nil { + return err + } + + respBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(clusterCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + clusterCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + clusterCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + clusterCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + clusterCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", clusterCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", clusterCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", clusterCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/delete.go b/cmd/delete.go new file mode 100644 index 0000000..ea21b04 --- /dev/null +++ b/cmd/delete.go @@ -0,0 +1,89 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + deleteCmd = &cobra.Command{ + Use: "delete ID", + Args: cobra.ExactArgs(1), + Short: "Delete a document", + Long: "Delete a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.DeleteRequest{ + Id: id, + } + + if err := c.Delete(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(deleteCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + deleteCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + deleteCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + deleteCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + deleteCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", deleteCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", deleteCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", deleteCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/get.go b/cmd/get.go new file mode 100644 index 0000000..99a62c0 --- /dev/null +++ b/cmd/get.go @@ -0,0 +1,99 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + getCmd = &cobra.Command{ + Use: "get ID", + Args: cobra.ExactArgs(1), + Short: "Get a document", + Long: "Get a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.GetRequest{ + Id: id, + } + + resp, err := c.Get(req) + if err != nil { + return err + } + + m := marshaler.BlastMarshaler{} + respBytes, err := m.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(getCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + getCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + getCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + getCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + getCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", getCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", getCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", getCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/healthcheck.go b/cmd/healthcheck.go new file mode 100644 index 0000000..ffe28a6 --- /dev/null +++ b/cmd/healthcheck.go @@ -0,0 +1,100 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + healthCheckCmd = &cobra.Command{ + Use: "healthcheck", + Short: "Health check a node", + Long: "Health check a node", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + lResp, err := c.LivenessCheck() + if err != nil { + return err + } + + rResp, err := c.ReadinessCheck() + if err != nil { + return err + } + + resp := map[string]bool{ + "liveness": lResp.Alive, + "readiness:": rResp.Ready, + } + + respBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(healthCheckCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + healthCheckCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + healthCheckCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + healthCheckCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + healthCheckCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", healthCheckCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", healthCheckCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", healthCheckCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/join.go b/cmd/join.go new file mode 100644 index 0000000..81bd84d --- /dev/null +++ b/cmd/join.go @@ -0,0 +1,104 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + joinCmd = &cobra.Command{ + Use: "join ID GRPC_ADDRESS", + Args: cobra.ExactArgs(2), + Short: "Join a node to the cluster", + Long: "Join a node to the cluster", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + targetGrpcAddress := args[1] + + t, err := client.NewGRPCClientWithContextTLS(targetGrpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = t.Close() + }() + + nodeResp, err := t.Node() + if err != nil { + return err + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.JoinRequest{ + Id: id, + Node: nodeResp.Node, + } + + if err := c.Join(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(joinCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + joinCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + joinCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + joinCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + joinCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", joinCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", joinCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", joinCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/leave.go b/cmd/leave.go new file mode 100644 index 0000000..42d8ffa --- /dev/null +++ b/cmd/leave.go @@ -0,0 +1,89 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + leaveCmd = &cobra.Command{ + Use: "leave ID", + Args: cobra.ExactArgs(1), + Short: "Leave a node from the cluster", + Long: "Leave a node from the cluster", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.LeaveRequest{ + Id: id, + } + + if err := c.Leave(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(leaveCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in config search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + leaveCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + leaveCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + leaveCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + leaveCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", leaveCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", leaveCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", leaveCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/mapping.go b/cmd/mapping.go new file mode 100644 index 0000000..bbf116d --- /dev/null +++ b/cmd/mapping.go @@ -0,0 +1,84 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + mappingCmd = &cobra.Command{ + Use: "mapping", + Short: "Get the index mapping", + Long: "Get the index mapping", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Mapping() + if err != nil { + return err + } + + fmt.Println(string(resp.Mapping)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(mappingCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + mappingCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + mappingCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + mappingCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + mappingCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", mappingCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", mappingCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", mappingCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/metrics.go b/cmd/metrics.go new file mode 100644 index 0000000..425d564 --- /dev/null +++ b/cmd/metrics.go @@ -0,0 +1,84 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + metricsCmd = &cobra.Command{ + Use: "metrics", + Short: "Get the node metrics", + Long: "Get the node metrics in Prometheus exposition format", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Metrics() + if err != nil { + return err + } + + fmt.Println(string(resp.Metrics)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(metricsCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + metricsCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + metricsCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + metricsCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + metricsCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", metricsCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", metricsCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", metricsCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/node.go b/cmd/node.go new file mode 100644 index 0000000..572c512 --- /dev/null +++ b/cmd/node.go @@ -0,0 +1,90 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + nodeCmd = &cobra.Command{ + Use: "node", + Short: "Get the node info", + Long: "Get the node info", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Node() + if err != nil { + return err + } + + respBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(nodeCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + nodeCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + nodeCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + nodeCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + nodeCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", nodeCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", nodeCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", nodeCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..f2c7120 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +var ( + rootCmd = &cobra.Command{ + Use: "blast", + Short: "The lightweight distributed search server", + Long: "The lightweight distributed search server", + } +) + +func Execute() error { + return rootCmd.Execute() +} diff --git a/cmd/search.go b/cmd/search.go new file mode 100644 index 0000000..e62b15b --- /dev/null +++ b/cmd/search.go @@ -0,0 +1,101 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + searchCmd = &cobra.Command{ + Use: "search REQUEST", + Args: cobra.ExactArgs(1), + Short: "Get a document", + Long: "Get a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + searchRequest := args[0] + + m := marshaler.BlastMarshaler{} + + req := &protobuf.SearchRequest{} + if err := m.Unmarshal([]byte(searchRequest), req); err != nil { + return err + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Search(req) + if err != nil { + return err + } + + respBytes, err := m.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(searchCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + searchCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + searchCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + searchCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + searchCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", searchCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", searchCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", searchCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/set.go b/cmd/set.go new file mode 100644 index 0000000..b765bd7 --- /dev/null +++ b/cmd/set.go @@ -0,0 +1,94 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + setCmd = &cobra.Command{ + Use: "set ID FIELDS", + Args: cobra.ExactArgs(2), + Short: "Set a document", + Long: "Set a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + fields := args[1] + + req := &protobuf.SetRequest{} + m := marshaler.BlastMarshaler{} + if err := m.Unmarshal([]byte(fields), req); err != nil { + return err + } + req.Id = id + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + if err := c.Set(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(setCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + setCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + setCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + setCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + setCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", setCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", setCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", setCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/snapshot.go b/cmd/snapshot.go new file mode 100644 index 0000000..2e76298 --- /dev/null +++ b/cmd/snapshot.go @@ -0,0 +1,81 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + snapshotCmd = &cobra.Command{ + Use: "snapshot", + Short: "Create a snapshot", + Long: "Create a snapshot which is full-volume copy of data stored on the node", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + if err := c.Snapshot(); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(snapshotCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + snapshotCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + snapshotCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + snapshotCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + snapshotCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", snapshotCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", snapshotCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", snapshotCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/start.go b/cmd/start.go new file mode 100644 index 0000000..a2b0fcd --- /dev/null +++ b/cmd/start.go @@ -0,0 +1,221 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/server" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + startCmd = &cobra.Command{ + Use: "start", + Short: "Start the index server", + Long: "Start the index server", + RunE: func(cmd *cobra.Command, args []string) error { + id = viper.GetString("id") + raftAddress = viper.GetString("raft_address") + grpcAddress = viper.GetString("grpc_address") + httpAddress = viper.GetString("http_address") + dataDirectory = viper.GetString("data_directory") + peerGrpcAddress = viper.GetString("peer_grpc_address") + + mappingFile = viper.GetString("mapping_file") + + certificateFile = viper.GetString("certificate_file") + keyFile = viper.GetString("key_file") + commonName = viper.GetString("common_name") + + corsAllowedMethods = viper.GetStringSlice("cors_allowed_methods") + corsAllowedOrigins = viper.GetStringSlice("cors_allowed_origins") + corsAllowedHeaders = viper.GetStringSlice("cors_allowed_headers") + + logLevel = viper.GetString("log_level") + logFile = viper.GetString("log_file") + logMaxSize = viper.GetInt("log_max_size") + logMaxBackups = viper.GetInt("log_max_backups") + logMaxAge = viper.GetInt("log_max_age") + logCompress = viper.GetBool("log_compress") + + logger := log.NewLogger( + logLevel, + logFile, + logMaxSize, + logMaxBackups, + logMaxAge, + logCompress, + ) + + bootstrap := peerGrpcAddress == "" || peerGrpcAddress == grpcAddress + + indexMapping := mapping.NewIndexMapping() + if mappingFile != "" { + var err error + if indexMapping, err = mapping.NewIndexMappingFromFile(mappingFile); err != nil { + return err + } + } + + raftServer, err := server.NewRaftServer(id, raftAddress, dataDirectory, indexMapping, bootstrap, logger) + if err != nil { + return err + } + + grpcServer, err := server.NewGRPCServerWithTLS(grpcAddress, raftServer, certificateFile, keyFile, commonName, logger) + if err != nil { + return err + } + + grpcGateway, err := server.NewGRPCGateway(httpAddress, grpcAddress, certificateFile, keyFile, commonName, corsAllowedMethods, corsAllowedOrigins, corsAllowedHeaders, logger) + if err != nil { + return err + } + + quitCh := make(chan os.Signal, 1) + signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + if err := raftServer.Start(); err != nil { + return err + } + + if err := grpcServer.Start(); err != nil { + return err + } + + if err := grpcGateway.Start(); err != nil { + return err + } + + // wait for detect leader if it's bootstrap + if bootstrap { + timeout := 60 * time.Second + if err := raftServer.WaitForDetectLeader(timeout); err != nil { + return err + } + } + + // create gRPC client for joining node + var joinGrpcAddress string + if bootstrap { + joinGrpcAddress = grpcAddress + } else { + joinGrpcAddress = peerGrpcAddress + } + + c, err := client.NewGRPCClientWithContextTLS(joinGrpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + // join this node to the existing cluster + joinRequest := &protobuf.JoinRequest{ + Id: id, + Node: &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + }, + } + if err = c.Join(joinRequest); err != nil { + return err + } + + // wait for receiving signal + <-quitCh + + _ = grpcGateway.Stop() + _ = grpcServer.Stop() + _ = raftServer.Stop() + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(startCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + startCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + startCmd.PersistentFlags().StringVar(&id, "id", "node1", "node ID") + startCmd.PersistentFlags().StringVar(&raftAddress, "raft-address", ":7000", "Raft server listen address") + startCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + startCmd.PersistentFlags().StringVar(&httpAddress, "http-address", ":8000", "HTTP server listen address") + startCmd.PersistentFlags().StringVar(&dataDirectory, "data-directory", "/tmp/blast/data", "data directory which store the index and Raft logs") + startCmd.PersistentFlags().StringVar(&peerGrpcAddress, "peer-grpc-address", "", "listen address of the existing gRPC server in the joining cluster") + startCmd.PersistentFlags().StringVar(&mappingFile, "mapping-file", "", "path to the index mapping file") + startCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + startCmd.PersistentFlags().StringVar(&keyFile, "key-file", "", "path to the client server TLS key file") + startCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + startCmd.PersistentFlags().StringSliceVar(&corsAllowedMethods, "cors-allowed-methods", []string{}, "CORS allowed methods (ex: GET,PUT,DELETE,POST)") + startCmd.PersistentFlags().StringSliceVar(&corsAllowedOrigins, "cors-allowed-origins", []string{}, "CORS allowed origins (ex: http://localhost:8080,http://localhost:80)") + startCmd.PersistentFlags().StringSliceVar(&corsAllowedHeaders, "cors-allowed-headers", []string{}, "CORS allowed headers (ex: content-type,x-some-key)") + startCmd.PersistentFlags().StringVar(&logLevel, "log-level", "INFO", "log level") + startCmd.PersistentFlags().StringVar(&logFile, "log-file", os.Stderr.Name(), "log file") + startCmd.PersistentFlags().IntVar(&logMaxSize, "log-max-size", 500, "max size of a log file in megabytes") + startCmd.PersistentFlags().IntVar(&logMaxBackups, "log-max-backups", 3, "max backup count of log files") + startCmd.PersistentFlags().IntVar(&logMaxAge, "log-max-age", 30, "max age of a log file in days") + startCmd.PersistentFlags().BoolVar(&logCompress, "log-compress", false, "compress a log file") + + _ = viper.BindPFlag("id", startCmd.PersistentFlags().Lookup("id")) + _ = viper.BindPFlag("raft_address", startCmd.PersistentFlags().Lookup("raft-address")) + _ = viper.BindPFlag("grpc_address", startCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("http_address", startCmd.PersistentFlags().Lookup("http-address")) + _ = viper.BindPFlag("data_directory", startCmd.PersistentFlags().Lookup("data-directory")) + _ = viper.BindPFlag("peer_grpc_address", startCmd.PersistentFlags().Lookup("peer-grpc-address")) + _ = viper.BindPFlag("mapping_file", startCmd.PersistentFlags().Lookup("mapping-file")) + _ = viper.BindPFlag("certificate_file", startCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("key_file", startCmd.PersistentFlags().Lookup("key-file")) + _ = viper.BindPFlag("common_name", startCmd.PersistentFlags().Lookup("common-name")) + _ = viper.BindPFlag("cors_allowed_methods", startCmd.PersistentFlags().Lookup("cors-allowed-methods")) + _ = viper.BindPFlag("cors_allowed_origins", startCmd.PersistentFlags().Lookup("cors-allowed-origins")) + _ = viper.BindPFlag("cors_allowed_headers", startCmd.PersistentFlags().Lookup("cors-allowed-headers")) + _ = viper.BindPFlag("log_level", startCmd.PersistentFlags().Lookup("log-level")) + _ = viper.BindPFlag("log_max_size", startCmd.PersistentFlags().Lookup("log-max-size")) + _ = viper.BindPFlag("log_max_backups", startCmd.PersistentFlags().Lookup("log-max-backups")) + _ = viper.BindPFlag("log_max_age", startCmd.PersistentFlags().Lookup("log-max-age")) + _ = viper.BindPFlag("log_compress", startCmd.PersistentFlags().Lookup("log-compress")) +} diff --git a/cmd/variables.go b/cmd/variables.go new file mode 100644 index 0000000..0e0ef9b --- /dev/null +++ b/cmd/variables.go @@ -0,0 +1,25 @@ +package cmd + +var ( + configFile string + id string + raftAddress string + grpcAddress string + httpAddress string + dataDirectory string + peerGrpcAddress string + mappingFile string + certificateFile string + keyFile string + commonName string + corsAllowedMethods []string + corsAllowedOrigins []string + corsAllowedHeaders []string + file string + logLevel string + logFile string + logMaxSize int + logMaxBackups int + logMaxAge int + logCompress bool +) diff --git a/cmd/version.go b/cmd/version.go new file mode 100644 index 0000000..01d8fa1 --- /dev/null +++ b/cmd/version.go @@ -0,0 +1,24 @@ +package cmd + +import ( + "fmt" + + "github.com/mosuka/blast/version" + "github.com/spf13/cobra" +) + +var ( + versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number", + Long: "Print the version number", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Printf("version: %s\n", version.Version) + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(versionCmd) +} diff --git a/cmd/watch.go b/cmd/watch.go new file mode 100644 index 0000000..da6be9f --- /dev/null +++ b/cmd/watch.go @@ -0,0 +1,157 @@ +package cmd + +import ( + "context" + "fmt" + "io" + "os" + "os/signal" + "syscall" + + "github.com/golang/protobuf/ptypes/empty" + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + watchCmd = &cobra.Command{ + Use: "watch", + Short: "Watch a node updates", + Long: "Watch a node updates", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &empty.Empty{} + watchClient, err := c.Watch(req) + if err != nil { + return err + } + + go func() { + for { + resp, err := watchClient.Recv() + if err == io.EOF { + break + } + if err != nil { + break + } + + switch resp.Event.Type { + case protobuf.Event_Join: + eventReq := &protobuf.SetMetadataRequest{} + if eventData, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if eventData == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + eventReq = eventData.(*protobuf.SetMetadataRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), eventReq) + case protobuf.Event_Leave: + eventReq := &protobuf.DeleteMetadataRequest{} + if eventData, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if eventData == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + eventReq = eventData.(*protobuf.DeleteMetadataRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), eventReq) + case protobuf.Event_Set: + putRequest := &protobuf.SetRequest{} + if putRequestInstance, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if putRequestInstance == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + putRequest = putRequestInstance.(*protobuf.SetRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), putRequest) + case protobuf.Event_Delete: + deleteRequest := &protobuf.DeleteRequest{} + if deleteRequestInstance, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if deleteRequestInstance == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + deleteRequest = deleteRequestInstance.(*protobuf.DeleteRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), deleteRequest) + } + } + }() + + quitCh := make(chan os.Signal, 1) + signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + <-quitCh + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(watchCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + watchCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + watchCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + watchCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + watchCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", watchCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", watchCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", watchCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/config/cluster_config.go b/config/cluster_config.go deleted file mode 100644 index fb56a5d..0000000 --- a/config/cluster_config.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -type ClusterConfig struct { - ManagerAddr string `json:"manager_addr,omitempty"` - ClusterId string `json:"cluster_id,omitempty"` - PeerAddr string `json:"peer_addr,omitempty"` -} - -func DefaultClusterConfig() *ClusterConfig { - return &ClusterConfig{} -} diff --git a/config/cluster_config_test.go b/config/cluster_config_test.go deleted file mode 100644 index 24d4c2e..0000000 --- a/config/cluster_config_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "reflect" - "testing" -) - -func TestDefaultClusterConfig(t *testing.T) { - exp := &ClusterConfig{} - act := DefaultClusterConfig() - if !reflect.DeepEqual(exp, act) { - t.Fatalf("expected content to see %v, saw %v", exp, act) - } - - expManagerAddr := "" - actManagerAddr := act.ManagerAddr - if expManagerAddr != actManagerAddr { - t.Fatalf("expected content to see %v, saw %v", expManagerAddr, actManagerAddr) - } - - expClusterId := "" - actClusterId := act.ClusterId - if expClusterId != actClusterId { - t.Fatalf("expected content to see %v, saw %v", expClusterId, actClusterId) - } - - expPeerAddr := "" - actPeerAddr := act.PeerAddr - if expPeerAddr != actPeerAddr { - t.Fatalf("expected content to see %v, saw %v", expPeerAddr, actPeerAddr) - } -} - -func TestClusterConfig_1(t *testing.T) { - expConfig := &ClusterConfig{ - ManagerAddr: ":12000", - ClusterId: "cluster1", - PeerAddr: ":5000", - } - actConfig := DefaultClusterConfig() - actConfig.ManagerAddr = ":12000" - actConfig.ClusterId = "cluster1" - actConfig.PeerAddr = ":5000" - - expManagerAddr := expConfig.ManagerAddr - actManagerAddr := actConfig.ManagerAddr - if expManagerAddr != actManagerAddr { - t.Fatalf("expected content to see %v, saw %v", expManagerAddr, actManagerAddr) - } - - expClusterId := expConfig.ClusterId - actClusterId := actConfig.ClusterId - if expClusterId != actClusterId { - t.Fatalf("expected content to see %v, saw %v", expClusterId, actClusterId) - } - - expPeerAddr := expConfig.PeerAddr - actPeerAddr := actConfig.PeerAddr - if expPeerAddr != actPeerAddr { - t.Fatalf("expected content to see %v, saw %v", expPeerAddr, actPeerAddr) - } -} diff --git a/config/index_config.go b/config/index_config.go deleted file mode 100644 index 89fd4e2..0000000 --- a/config/index_config.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "encoding/json" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" -) - -type IndexConfig struct { - IndexMapping *mapping.IndexMappingImpl `json:"index_mapping,omitempty"` - IndexType string `json:"index_type,omitempty"` - IndexStorageType string `json:"index_storage_type,omitempty"` -} - -func DefaultIndexConfig() *IndexConfig { - return &IndexConfig{ - IndexMapping: mapping.NewIndexMapping(), - IndexType: bleve.Config.DefaultIndexType, - IndexStorageType: bleve.Config.DefaultKVStore, - } -} - -func NewIndexConfigFromMap(src map[string]interface{}) *IndexConfig { - b, err := json.Marshal(src) - if err != nil { - return &IndexConfig{} - } - - var indexConfig *IndexConfig - err = json.Unmarshal(b, &indexConfig) - if err != nil { - return &IndexConfig{} - } - - return indexConfig -} - -func (c *IndexConfig) ToMap() map[string]interface{} { - b, err := json.Marshal(c) - if err != nil { - return map[string]interface{}{} - } - - var m map[string]interface{} - err = json.Unmarshal(b, &m) - if err != nil { - return map[string]interface{}{} - } - - return m -} diff --git a/config/index_config_test.go b/config/index_config_test.go deleted file mode 100644 index 674766c..0000000 --- a/config/index_config_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "reflect" - "testing" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" -) - -func TestDefaultIndexConfig(t *testing.T) { - expConfig := &IndexConfig{ - IndexMapping: mapping.NewIndexMapping(), - IndexType: bleve.Config.DefaultIndexType, - IndexStorageType: bleve.Config.DefaultKVStore, - } - actConfig := DefaultIndexConfig() - - if !reflect.DeepEqual(expConfig, actConfig) { - t.Fatalf("expected content to see %v, saw %v", expConfig, actConfig) - } -} diff --git a/config/node_config.go b/config/node_config.go deleted file mode 100644 index 7b0b61b..0000000 --- a/config/node_config.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "encoding/json" - "fmt" - - "github.com/mosuka/blast/strutils" -) - -type NodeConfig struct { - NodeId string `json:"node_id,omitempty"` - BindAddr string `json:"bind_addr,omitempty"` - GRPCAddr string `json:"grpc_addr,omitempty"` - HTTPAddr string `json:"http_addr,omitempty"` - DataDir string `json:"data_dir,omitempty"` - RaftStorageType string `json:"raft_storage_type,omitempty"` -} - -func DefaultNodeConfig() *NodeConfig { - return &NodeConfig{ - NodeId: fmt.Sprintf("node-%s", strutils.RandStr(5)), - BindAddr: ":2000", - GRPCAddr: ":5000", - HTTPAddr: ":8000", - DataDir: "/tmp/blast", - RaftStorageType: "boltdb", - } -} - -func (c *NodeConfig) ToMap() map[string]interface{} { - b, err := json.Marshal(c) - if err != nil { - return map[string]interface{}{} - } - - var m map[string]interface{} - err = json.Unmarshal(b, &m) - if err != nil { - return map[string]interface{}{} - } - - return m -} diff --git a/config/node_config_test.go b/config/node_config_test.go deleted file mode 100644 index da69880..0000000 --- a/config/node_config_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "fmt" - "testing" - - "github.com/mosuka/blast/strutils" -) - -func TestDefaultNodeConfig(t *testing.T) { - expConfig := &NodeConfig{ - NodeId: fmt.Sprintf("node-%s", strutils.RandStr(5)), - BindAddr: ":2000", - GRPCAddr: ":5000", - HTTPAddr: ":8000", - DataDir: "/tmp/blast", - RaftStorageType: "boltdb", - } - actConfig := DefaultNodeConfig() - - if expConfig.BindAddr != actConfig.BindAddr { - t.Fatalf("expected content to see %v, saw %v", expConfig.BindAddr, actConfig.BindAddr) - } - - if expConfig.GRPCAddr != actConfig.GRPCAddr { - t.Fatalf("expected content to see %v, saw %v", expConfig.GRPCAddr, actConfig.GRPCAddr) - } - - if expConfig.HTTPAddr != actConfig.HTTPAddr { - t.Fatalf("expected content to see %v, saw %v", expConfig.HTTPAddr, actConfig.HTTPAddr) - } - - if expConfig.DataDir != actConfig.DataDir { - t.Fatalf("expected content to see %v, saw %v", expConfig.DataDir, actConfig.DataDir) - } - - if expConfig.RaftStorageType != actConfig.RaftStorageType { - t.Fatalf("expected content to see %v, saw %v", expConfig.RaftStorageType, actConfig.RaftStorageType) - } -} diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go deleted file mode 100644 index f69170c..0000000 --- a/dispatcher/grpc_service.go +++ /dev/null @@ -1,936 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "context" - "errors" - "hash/fnv" - "io" - "math/rand" - "reflect" - "sort" - "sync" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/search" - "github.com/golang/protobuf/ptypes/any" - "github.com/hashicorp/raft" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/sortutils" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCService struct { - *grpc.Service - - managerAddr string - logger *zap.Logger - - managers map[string]interface{} - managerClients map[string]*grpc.Client - updateManagersStopCh chan struct{} - updateManagersDoneCh chan struct{} - - indexers map[string]interface{} - indexerClients map[string]map[string]*grpc.Client - updateIndexersStopCh chan struct{} - updateIndexersDoneCh chan struct{} -} - -func NewGRPCService(managerAddr string, logger *zap.Logger) (*GRPCService, error) { - return &GRPCService{ - managerAddr: managerAddr, - logger: logger, - - managers: make(map[string]interface{}, 0), - managerClients: make(map[string]*grpc.Client, 0), - - indexers: make(map[string]interface{}, 0), - indexerClients: make(map[string]map[string]*grpc.Client, 0), - }, nil -} - -func (s *GRPCService) Start() error { - s.logger.Info("start to update manager cluster info") - go s.startUpdateManagers(500 * time.Millisecond) - - s.logger.Info("start to update indexer cluster info") - go s.startUpdateIndexers(500 * time.Millisecond) - - return nil -} - -func (s *GRPCService) Stop() error { - s.logger.Info("stop to update manager cluster info") - s.stopUpdateManagers() - - s.logger.Info("stop to update indexer cluster info") - s.stopUpdateIndexers() - - return nil -} - -func (s *GRPCService) getManagerClient() (*grpc.Client, error) { - var client *grpc.Client - - for id, node := range s.managers { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("id", id)) - continue - } - - state, ok := nm["state"].(string) - if !ok { - s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) - continue - } - - if state == raft.Leader.String() || state == raft.Follower.String() { - client, ok = s.managerClients[id] - if ok { - return client, nil - } else { - s.logger.Error("node does not exist", zap.String("id", id)) - } - } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", state)) - } - } - - err := errors.New("available client does not exist") - s.logger.Error(err.Error()) - - return nil, err -} - -func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interface{}, error) { - client, err := grpc.NewClient(s.managerAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - return - }() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - managers, err := client.GetCluster() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return managers, nil -} - -func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { - s.updateManagersStopCh = make(chan struct{}) - s.updateManagersDoneCh = make(chan struct{}) - - defer func() { - close(s.updateManagersDoneCh) - }() - - var err error - - // get initial managers - s.managers, err = s.getInitialManagers(s.managerAddr) - if err != nil { - s.logger.Error(err.Error()) - return - } - s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) - - // create clients for managers - for nodeId, node := range s.managers { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) - continue - } - - nodeConfig, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } - - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - if client != nil { - s.managerClients[nodeId] = client - } - } - - for { - select { - case <-s.updateManagersStopCh: - s.logger.Info("received a request to stop updating a manager cluster") - return - default: - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - // create stream - stream, err := client.WatchCluster() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.logger.Info("wait for receive a manager cluster updates from stream") - resp, err := stream.Recv() - if err == io.EOF { - s.logger.Info(err.Error()) - continue - } - if err != nil { - s.logger.Error(err.Error()) - continue - } - - // get current manager cluster - managersIntr, err := protobuf.MarshalAny(resp.Cluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - if managersIntr == nil { - s.logger.Error(err.Error()) - continue - } - managers := *managersIntr.(*map[string]interface{}) - - if !reflect.DeepEqual(s.managers, managers) { - // open clients - for nodeId, metadata := range managers { - mm, ok := metadata.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) - continue - } - - grpcAddr, ok := mm["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - client, exist := s.managerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in manager list", zap.String("node_id", nodeId)) - - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - - delete(s.managerClients, nodeId) - - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId)) - } - - newClient, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - - if newClient != nil { - s.managerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - if newClient != nil { - s.managerClients[nodeId] = newClient - } - } - } - - // close nonexistent clients - for nodeId, client := range s.managerClients { - if nodeConfig, exist := managers[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.managerClients, nodeId) - } - } - - // keep current manager cluster - s.managers = managers - s.logger.Debug("managers", zap.Any("managers", s.managers)) - } - } - } -} - -func (s *GRPCService) stopUpdateManagers() { - s.logger.Info("close all manager clients") - for id, client := range s.managerClients { - s.logger.Debug("close manager client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - } - - if s.updateManagersStopCh != nil { - s.logger.Info("send a request to stop updating a manager cluster") - close(s.updateManagersStopCh) - } - - s.logger.Info("wait for the manager cluster update to stop") - <-s.updateManagersDoneCh - s.logger.Info("the manager cluster update has been stopped") -} - -func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { - s.updateIndexersStopCh = make(chan struct{}) - s.updateIndexersDoneCh = make(chan struct{}) - - defer func() { - close(s.updateIndexersDoneCh) - }() - - // wait for manager available - s.logger.Info("wait for manager clients are available") - for { - if len(s.managerClients) > 0 { - s.logger.Info("manager clients are available") - break - } - time.Sleep(100 * time.Millisecond) - } - - // get active client for manager - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - } - - // get initial indexers - clusters, err := client.GetValue("/cluster_config/clusters/") - if err != nil { - s.logger.Error(err.Error()) - } - if clusters == nil { - s.logger.Error("nil") - } - s.indexers = *clusters.(*map[string]interface{}) - - // create clients for indexer - for clusterId, cluster := range s.indexers { - cm, ok := cluster.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("cluster", cm)) - continue - } - - nodes, ok := cm["nodes"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("nodes", nodes)) - continue - } - - for nodeId, node := range nodes { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) - continue - } - - metadata, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("node_config", metadata)) - continue - } - - grpcAddr, ok := metadata["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := grpc.NewClient(metadata["grpc_addr"].(string)) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - if _, exist := s.indexerClients[clusterId]; !exist { - s.indexerClients[clusterId] = make(map[string]*grpc.Client) - } - s.indexerClients[clusterId][nodeId] = client - } - } - - for { - select { - case <-s.updateIndexersStopCh: - s.logger.Info("received a request to stop updating a indexer cluster") - return - default: - client, err = s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - stream, err := client.WatchStore("/cluster_config/clusters/") - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.logger.Info("wait for receive a indexer cluster updates from stream") - resp, err := stream.Recv() - if err == io.EOF { - continue - } - if err != nil { - s.logger.Error(err.Error()) - continue - } - s.logger.Debug("data has changed", zap.String("key", resp.Key)) - - cluster, err := client.GetValue("/cluster_config/clusters/") - if err != nil { - s.logger.Error(err.Error()) - continue - } - if cluster == nil { - s.logger.Error("nil") - continue - } - indexers := *cluster.(*map[string]interface{}) - - // compare previous manager with current manager - if !reflect.DeepEqual(s.indexers, indexers) { - // create clients for indexer - for clusterId, cluster := range s.indexers { - cm, ok := cluster.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("cluster", cm)) - continue - } - - nodes, ok := cm["nodes"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("nodes", nodes)) - continue - } - - for nodeId, node := range nodes { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) - continue - } - - nodeConfig, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } - - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - client, exist := s.indexerClients[clusterId][nodeId] - if exist { - s.logger.Debug("client has already exist in manager list", zap.String("node_id", nodeId)) - - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - - delete(s.indexerClients[clusterId], nodeId) - - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId)) - } - - newClient, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - - if newClient != nil { - s.indexerClients[clusterId][nodeId] = newClient - } - } - - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := grpc.NewClient(nodeConfig["grpc_addr"].(string)) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - if _, exist := s.indexerClients[clusterId]; !exist { - s.indexerClients[clusterId] = make(map[string]*grpc.Client) - } - s.indexerClients[clusterId][nodeId] = newClient - } - } - } - - } - } - } -} - -func (s *GRPCService) stopUpdateIndexers() { - s.logger.Info("close all indexer clients") - for clusterId, cluster := range s.indexerClients { - for id, client := range cluster { - s.logger.Debug("close indexer client", zap.String("cluster_id", clusterId), zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - } - } - - if s.updateIndexersStopCh != nil { - s.logger.Info("send a request to stop updating a index cluster") - close(s.updateIndexersStopCh) - } - - s.logger.Info("wait for the indexer cluster update to stop") - <-s.updateIndexersDoneCh - s.logger.Info("the indexer cluster update has been stopped") -} - -func (s *GRPCService) getIndexerClients() map[string]*grpc.Client { - indexerClients := make(map[string]*grpc.Client, 0) - - for clusterId, cluster := range s.indexerClients { - nodeIds := make([]string, 0) - for nodeId := range cluster { - nodeIds = append(nodeIds, nodeId) - } - - // pick a client at random - nodeId := nodeIds[rand.New(rand.NewSource(time.Now().UnixNano())).Intn(len(nodeIds))] - - indexerClients[clusterId] = s.indexerClients[clusterId][nodeId] - } - - return indexerClients -} - -func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocumentRequest) (*protobuf.GetDocumentResponse, error) { - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - fields map[string]interface{} - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *grpc.Client, id string, respChan chan respVal) { - // index documents - fields, err := client.GetDocument(id) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - fields: fields, - err: err, - } - }(clusterId, client, req.Id, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // summarize responses - var fields map[string]interface{} - for r := range respChan { - if r.fields != nil { - fields = r.fields - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - resp := &protobuf.GetDocumentResponse{} - - fieldsAny := &any.Any{} - err := protobuf.UnmarshalAny(fields, fieldsAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - - // response - resp.Fields = fieldsAny - - return resp, nil -} - -func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { - start := time.Now() - - resp := &protobuf.SearchResponse{} - - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - searchResult *bleve.SearchResult - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - // create search request - ins, err := protobuf.MarshalAny(req.SearchRequest) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - searchRequest := ins.(*bleve.SearchRequest) - - // change to distributed search request - from := searchRequest.From - size := searchRequest.Size - searchRequest.From = 0 - searchRequest.Size = from + size - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *grpc.Client, searchRequest *bleve.SearchRequest, respChan chan respVal) { - searchResult, err := client.Search(searchRequest) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - searchResult: searchResult, - err: err, - } - }(clusterId, client, searchRequest, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // revert to original search request - searchRequest.From = from - searchRequest.Size = size - - // summarize responses - var searchResult *bleve.SearchResult - for r := range respChan { - if r.searchResult != nil { - if searchResult == nil { - searchResult = r.searchResult - } else { - searchResult.Merge(r.searchResult) - } - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - // handle case where no results were successful - if searchResult == nil { - searchResult = &bleve.SearchResult{ - Status: &bleve.SearchStatus{ - Errors: make(map[string]error), - }, - } - } - - // sort all hits with the requested order - if len(searchRequest.Sort) > 0 { - sorter := sortutils.NewMultiSearchHitSorter(searchRequest.Sort, searchResult.Hits) - sort.Sort(sorter) - } - - // now skip over the correct From - if searchRequest.From > 0 && len(searchResult.Hits) > searchRequest.From { - searchResult.Hits = searchResult.Hits[searchRequest.From:] - } else if searchRequest.From > 0 { - searchResult.Hits = search.DocumentMatchCollection{} - } - - // now trim to the correct size - if searchRequest.Size > 0 && len(searchResult.Hits) > searchRequest.Size { - searchResult.Hits = searchResult.Hits[0:searchRequest.Size] - } - - // fix up facets - for name, fr := range searchRequest.Facets { - searchResult.Facets.Fixup(name, fr.Size) - } - - // fix up original request - searchResult.Request = searchRequest - searchDuration := time.Since(start) - searchResult.Took = searchDuration - - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - - // response - resp.SearchResult = searchResultAny - - return resp, nil -} - -func (s *GRPCService) docIdHash(docId string) uint64 { - hash := fnv.New64() - _, err := hash.Write([]byte(docId)) - if err != nil { - return 0 - } - - return hash.Sum64() -} - -func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) error { - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - // initialize document list for each cluster - docSet := make(map[string][]map[string]interface{}, 0) - for _, clusterId := range clusterIds { - docSet[clusterId] = make([]map[string]interface{}, 0) - } - - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - - // fields - ins, err := protobuf.MarshalAny(req.Fields) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - fields := *ins.(*map[string]interface{}) - - // document - doc := map[string]interface{}{ - "id": req.Id, - "fields": fields, - } - - // distribute documents to each cluster based on document id - docIdHash := s.docIdHash(req.Id) - clusterNum := uint64(len(indexerClients)) - clusterId := clusterIds[int(docIdHash%clusterNum)] - docSet[clusterId] = append(docSet[clusterId], doc) - } - - type respVal struct { - clusterId string - count int - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, docs := range docSet { - wg.Add(1) - go func(clusterId string, docs []map[string]interface{}, respChan chan respVal) { - count, err := indexerClients[clusterId].IndexDocument(docs) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - count: count, - err: err, - } - }(clusterId, docs, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // summarize responses - totalCount := 0 - for r := range respChan { - if r.count >= 0 { - totalCount += r.count - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - // response - resp := &protobuf.IndexDocumentResponse{ - Count: int32(totalCount), - } - - return stream.SendAndClose(resp) -} - -func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) error { - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - ids := make([]string, 0) - - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - - ids = append(ids, req.Id) - } - - type respVal struct { - clusterId string - count int - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *grpc.Client, ids []string, respChan chan respVal) { - // index documents - count, err := client.DeleteDocument(ids) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - count: count, - err: err, - } - }(clusterId, client, ids, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // summarize responses - totalCount := len(ids) - for r := range respChan { - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - // response - resp := &protobuf.DeleteDocumentResponse{ - Count: int32(totalCount), - } - - return stream.SendAndClose(resp) -} diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go deleted file mode 100644 index 8ce7744..0000000 --- a/dispatcher/http_handler.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "time" - - "github.com/blevesearch/bleve" - "github.com/gorilla/mux" - "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { - router, err := blasthttp.NewRouter(grpcAddr, logger) - if err != nil { - return nil, err - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/documents", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/documents/{id}", NewGetDocumentHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/documents/{id}", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents/{id}", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/search", NewSearchHandler(router.GRPCClient, logger)).Methods("POST") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type GetHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewGetDocumentHandler(client *grpc.Client, logger *zap.Logger) *GetHandler { - return &GetHandler{ - client: client, - logger: logger, - } -} - -func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - fields, err := h.client.GetDocument(vars["id"]) - if err != nil { - switch err { - case errors.ErrNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // map[string]interface{} -> bytes - content, err = json.MarshalIndent(fields, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type IndexHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewSetDocumentHandler(client *grpc.Client, logger *zap.Logger) *IndexHandler { - return &IndexHandler{ - client: client, - logger: logger, - } -} - -func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - docs := make([]map[string]interface{}, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - // Indexing documents in bulk - err := json.Unmarshal(bodyBytes, &docs) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } else { - // Indexing a document - var fields map[string]interface{} - err := json.Unmarshal(bodyBytes, &fields) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - doc := map[string]interface{}{ - "id": id, - "fields": fields, - } - - docs = append(docs, doc) - } - - // index documents in bulk - count, err := h.client.IndexDocument(docs) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type DeleteHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewDeleteDocumentHandler(client *grpc.Client, logger *zap.Logger) *DeleteHandler { - return &DeleteHandler{ - client: client, - logger: logger, - } -} - -func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - ids := make([]string, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - // Deleting documents in bulk - err := json.Unmarshal(bodyBytes, &ids) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } else { - // Deleting a document - ids = append(ids, id) - } - - // delete documents in bulk - count, err := h.client.DeleteDocument(ids) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } -} - -type SearchHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewSearchHandler(client *grpc.Client, logger *zap.Logger) *SearchHandler { - return &SearchHandler{ - client: client, - logger: logger, - } -} - -func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - searchRequestBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // []byte -> bleve.SearchRequest - searchRequest := bleve.NewSearchRequest(nil) - if len(searchRequestBytes) > 0 { - err := json.Unmarshal(searchRequestBytes, searchRequest) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } - - searchResult, err := h.client.Search(searchRequest) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = json.MarshalIndent(&searchResult, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/dispatcher/server.go b/dispatcher/server.go deleted file mode 100644 index b25debb..0000000 --- a/dispatcher/server.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/http" - "go.uber.org/zap" -) - -type Server struct { - clusterConfig *config.ClusterConfig - nodeConfig *config.NodeConfig - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger - - grpcService *GRPCService - grpcServer *grpc.Server - httpRouter *http.Router - httpServer *http.Server -} - -func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - return &Server{ - clusterConfig: clusterConfig, - nodeConfig: nodeConfig, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() { - var err error - - // create gRPC service - s.grpcService, err = NewGRPCService(s.clusterConfig.ManagerAddr, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC server - s.grpcServer, err = grpc.NewServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP router - s.httpRouter, err = NewRouter(s.nodeConfig.GRPCAddr, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP server - s.httpServer, err = http.NewServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start gRPC service - s.logger.Info("start gRPC service") - go func() { - err := s.grpcService.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC server - s.logger.Info("start gRPC server") - go func() { - err := s.grpcServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start HTTP server - s.logger.Info("start HTTP server") - go func() { - _ = s.httpServer.Start() - }() -} - -func (s *Server) Stop() { - s.logger.Info("stop HTTP server") - err := s.httpServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - err = s.httpRouter.Close() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC server") - err = s.grpcServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC service") - err = s.grpcService.Stop() - if err != nil { - s.logger.Error(err.Error()) - } -} diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go deleted file mode 100644 index 58c69cc..0000000 --- a/dispatcher/server_test.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/testutils" -) - -func TestServer_Start(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // - // manager - // - // create cluster config - managerClusterConfig1 := config.DefaultClusterConfig() - // create node config - managerNodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(managerNodeConfig1.DataDir) - }() - // create manager - manager1, err := manager.NewServer(managerClusterConfig1, managerNodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger.Named("manager1"), httpAccessLogger) - defer func() { - if manager1 != nil { - manager1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start manager - manager1.Start() - // sleep - time.Sleep(5 * time.Second) - - // create cluster config - managerClusterConfig2 := config.DefaultClusterConfig() - managerClusterConfig2.PeerAddr = managerNodeConfig1.GRPCAddr - // create node config - managerNodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(managerNodeConfig2.DataDir) - }() - // create manager - manager2, err := manager.NewServer(managerClusterConfig2, managerNodeConfig2, indexConfig, logger.Named("manager2"), grpcLogger.Named("manager2"), httpAccessLogger) - defer func() { - if manager2 != nil { - manager2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start manager - manager2.Start() - // sleep - time.Sleep(5 * time.Second) - - // create cluster config - managerClusterConfig3 := config.DefaultClusterConfig() - managerClusterConfig3.PeerAddr = managerNodeConfig1.GRPCAddr - // create node config - managerNodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(managerNodeConfig3.DataDir) - }() - // create manager - manager3, err := manager.NewServer(managerClusterConfig3, managerNodeConfig3, indexConfig, logger.Named("manager3"), grpcLogger.Named("manager3"), httpAccessLogger) - defer func() { - if manager3 != nil { - manager3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start manager - manager3.Start() - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - managerClient1, err := grpc.NewClient(managerNodeConfig1.GRPCAddr) - defer func() { - _ = managerClient1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - // get cluster info from manager1 - managerCluster1, err := managerClient1.GetCluster() - if err != nil { - t.Fatalf("%v", err) - } - expManagerCluster1 := map[string]interface{}{ - managerNodeConfig1.NodeId: map[string]interface{}{ - "node_config": managerNodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - managerNodeConfig2.NodeId: map[string]interface{}{ - "node_config": managerNodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - managerNodeConfig3.NodeId: map[string]interface{}{ - "node_config": managerNodeConfig3.ToMap(), - "state": raft.Follower.String(), - }, - } - actManagerCluster1 := managerCluster1 - expManagerNodeConfig1 := expManagerCluster1[managerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actManagerNodeConfig1 := actManagerCluster1[managerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expManagerNodeConfig1, actManagerNodeConfig1) { - t.Fatalf("expected content to see %v, saw %v", expManagerNodeConfig1, actManagerNodeConfig1) - } - actManagerState1 := actManagerCluster1[managerNodeConfig1.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actManagerState1 && raft.Follower.String() != actManagerState1 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actManagerState1) - } - expManagerNodeConfig2 := expManagerCluster1[managerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actManagerNodeConfig2 := actManagerCluster1[managerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expManagerNodeConfig2, actManagerNodeConfig2) { - t.Fatalf("expected content to see %v, saw %v", expManagerNodeConfig2, actManagerNodeConfig2) - } - actManagerState2 := actManagerCluster1[managerNodeConfig2.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actManagerState2 && raft.Follower.String() != actManagerState2 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actManagerState2) - } - expManagerNodeConfig3 := expManagerCluster1[managerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actManagerNodeConfig3 := actManagerCluster1[managerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expManagerNodeConfig3, actManagerNodeConfig3) { - t.Fatalf("expected content to see %v, saw %v", expManagerNodeConfig3, actManagerNodeConfig3) - } - actManagerState3 := actManagerCluster1[managerNodeConfig3.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actManagerState3 && raft.Follower.String() != actManagerState3 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actManagerState3) - } - - // - // indexer cluster1 - // - // create cluster config - indexerClusterConfig1 := config.DefaultClusterConfig() - indexerClusterConfig1.ManagerAddr = managerNodeConfig1.GRPCAddr - indexerClusterConfig1.ClusterId = "cluster1" - // create node config - indexerNodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(indexerNodeConfig1.DataDir) - }() - indexer1, err := indexer.NewServer(indexerClusterConfig1, indexerNodeConfig1, config.DefaultIndexConfig(), logger.Named("indexer1"), grpcLogger.Named("indexer1"), httpAccessLogger) - defer func() { - if indexer1 != nil { - indexer1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server - indexer1.Start() - // sleep - time.Sleep(5 * time.Second) - - // create node config - indexerNodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(indexerNodeConfig2.DataDir) - }() - indexer2, err := indexer.NewServer(indexerClusterConfig1, indexerNodeConfig2, config.DefaultIndexConfig(), logger.Named("indexer2"), grpcLogger.Named("indexer2"), httpAccessLogger) - defer func() { - if indexer2 != nil { - indexer2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server - indexer2.Start() - // sleep - time.Sleep(5 * time.Second) - - // create node config - indexerNodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(indexerNodeConfig3.DataDir) - }() - indexer3, err := indexer.NewServer(indexerClusterConfig1, indexerNodeConfig3, config.DefaultIndexConfig(), logger.Named("indexer3"), grpcLogger.Named("indexer3"), httpAccessLogger) - defer func() { - if indexer3 != nil { - indexer3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server - indexer3.Start() - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - indexerClient1, err := grpc.NewClient(indexerNodeConfig1.GRPCAddr) - defer func() { - _ = indexerClient1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - // get cluster info from manager1 - indexerCluster1, err := indexerClient1.GetCluster() - if err != nil { - t.Fatalf("%v", err) - } - expIndexerCluster1 := map[string]interface{}{ - indexerNodeConfig1.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - indexerNodeConfig2.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - indexerNodeConfig3.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig3.ToMap(), - "state": raft.Follower.String(), - }, - } - actIndexerCluster1 := indexerCluster1 - expIndexerNodeConfig1 := expIndexerCluster1[indexerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig1 := actIndexerCluster1[indexerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig1, actIndexerNodeConfig1) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig1, actIndexerNodeConfig1) - } - actIndexerState1 := actIndexerCluster1[indexerNodeConfig1.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState1 && raft.Follower.String() != actIndexerState1 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState1) - } - expIndexerNodeConfig2 := expIndexerCluster1[indexerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig2 := actIndexerCluster1[indexerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig2, actIndexerNodeConfig2) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig2, actIndexerNodeConfig2) - } - actIndexerState2 := actIndexerCluster1[indexerNodeConfig2.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState2 && raft.Follower.String() != actIndexerState2 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState2) - } - expIndexerNodeConfig3 := expIndexerCluster1[indexerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig3 := actIndexerCluster1[indexerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig3, actIndexerNodeConfig3) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig3, actIndexerNodeConfig3) - } - actIndexerState3 := actIndexerCluster1[indexerNodeConfig3.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState3 && raft.Follower.String() != actIndexerState3 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState3) - } - - // - // indexer cluster2 - // - // create cluster config - indexerClusterConfig2 := config.DefaultClusterConfig() - indexerClusterConfig2.ManagerAddr = managerNodeConfig1.GRPCAddr - indexerClusterConfig2.ClusterId = "cluster2" - // create node config - indexerNodeConfig4 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(indexerNodeConfig4.DataDir) - }() - indexer4, err := indexer.NewServer(indexerClusterConfig2, indexerNodeConfig4, config.DefaultIndexConfig(), logger.Named("indexer4"), grpcLogger.Named("indexer4"), httpAccessLogger) - defer func() { - if indexer4 != nil { - indexer4.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server - indexer4.Start() - // sleep - time.Sleep(5 * time.Second) - - // create node config - indexerNodeConfig5 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(indexerNodeConfig5.DataDir) - }() - indexer5, err := indexer.NewServer(indexerClusterConfig2, indexerNodeConfig5, config.DefaultIndexConfig(), logger.Named("indexer5"), grpcLogger.Named("indexer5"), httpAccessLogger) - defer func() { - if indexer5 != nil { - indexer5.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server - indexer5.Start() - // sleep - time.Sleep(5 * time.Second) - - // create node config - indexerNodeConfig6 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(indexerNodeConfig6.DataDir) - }() - indexer6, err := indexer.NewServer(indexerClusterConfig2, indexerNodeConfig6, config.DefaultIndexConfig(), logger.Named("indexer6"), grpcLogger.Named("indexer6"), httpAccessLogger) - defer func() { - if indexer6 != nil { - indexer6.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server - indexer6.Start() - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - indexerClient2, err := grpc.NewClient(indexerNodeConfig4.GRPCAddr) - defer func() { - _ = indexerClient1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - // get cluster info from manager1 - indexerCluster2, err := indexerClient2.GetCluster() - if err != nil { - t.Fatalf("%v", err) - } - expIndexerCluster2 := map[string]interface{}{ - indexerNodeConfig4.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig4.ToMap(), - "state": raft.Leader.String(), - }, - indexerNodeConfig5.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig5.ToMap(), - "state": raft.Follower.String(), - }, - indexerNodeConfig6.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig6.ToMap(), - "state": raft.Follower.String(), - }, - } - actIndexerCluster2 := indexerCluster2 - expIndexerNodeConfig4 := expIndexerCluster2[indexerNodeConfig4.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig4 := actIndexerCluster2[indexerNodeConfig4.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig4, actIndexerNodeConfig4) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig4, actIndexerNodeConfig4) - } - actIndexerState4 := actIndexerCluster2[indexerNodeConfig4.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState4 && raft.Follower.String() != actIndexerState4 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState4) - } - expIndexerNodeConfig5 := expIndexerCluster2[indexerNodeConfig5.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig5 := actIndexerCluster2[indexerNodeConfig5.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig5, actIndexerNodeConfig5) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig5, actIndexerNodeConfig5) - } - actIndexerState5 := actIndexerCluster2[indexerNodeConfig5.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState5 && raft.Follower.String() != actIndexerState5 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState5) - } - expIndexerNodeConfig6 := expIndexerCluster2[indexerNodeConfig6.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig6 := actIndexerCluster2[indexerNodeConfig6.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig6, actIndexerNodeConfig6) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig6, actIndexerNodeConfig6) - } - actIndexerState6 := actIndexerCluster2[indexerNodeConfig6.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState6 && raft.Follower.String() != actIndexerState6 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState6) - } - - // - // dispatcher - // - // create cluster config - dispatcherClusterConfig1 := config.DefaultClusterConfig() - dispatcherClusterConfig1.ManagerAddr = managerNodeConfig1.GRPCAddr - // create node config - dispatcherNodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(dispatcherNodeConfig.DataDir) - }() - dispatcher1, err := NewServer(dispatcherClusterConfig1, dispatcherNodeConfig, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) - defer func() { - dispatcher1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server - dispatcher1.Start() - - // sleep - time.Sleep(5 * time.Second) -} diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh deleted file mode 100755 index 1cf687a..0000000 --- a/docker-entrypoint.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2019 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -exec "$@" diff --git a/errors/errors.go b/errors/errors.go index cc538f6..fcdf16f 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -1,23 +1,15 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package errors import "errors" var ( - ErrNotFoundLeader = errors.New("does not found leader") - ErrNotFound = errors.New("not found") - ErrTimeout = errors.New("timeout") + ErrNotFoundLeader = errors.New("does not found leader") + ErrNodeAlreadyExists = errors.New("node already exists") + ErrNodeDoesNotExist = errors.New("node does not exist") + ErrNodeNotReady = errors.New("node not ready") + ErrNotFound = errors.New("not found") + ErrTimeout = errors.New("timeout") + ErrNoUpdate = errors.New("no update") + ErrNil = errors.New("data is nil") + ErrUnsupportedEvent = errors.New("unsupported event") ) diff --git a/etc/blast.yaml b/etc/blast.yaml new file mode 100644 index 0000000..a03e89a --- /dev/null +++ b/etc/blast.yaml @@ -0,0 +1,43 @@ +# +# General +# +id: "node1" +raft_address: ":7000" +grpc_address: ":9000" +http_address: ":8000" +data_directory: "/tmp/blast/node1/data" +#mapping_file: "./etc/blast_mapping.json" +peer_grpc_address: "" + +# +# TLS +# +#certificate_file: "./etc/blast-cert.pem" +#key_file: "./etc/blast-key.pem" +#common_name: "localhost" + +# +# CORS +# +#cors_allowed_methods: [ +# "GET", +# "PUT", +# "DELETE", +# "POST" +#] +#cors_allowed_origins: [ +# "http://localhost:8080" +#] +#cors_allowed_headers: [ +# "content-type" +#] + +# +# Logging +# +log_level: "INFO" +log_file: "" +#log_max_size: 500 +#log_max_backups: 3 +#log_max_age: 30 +#log_compress: false diff --git a/etc/blast_mapping.json b/etc/blast_mapping.json new file mode 100644 index 0000000..118348c --- /dev/null +++ b/etc/blast_mapping.json @@ -0,0 +1,103 @@ +{ + "types": { + "example": { + "enabled": true, + "dynamic": true, + "properties": { + "title": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "text": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "url": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + }, + "timestamp": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "datetime", + "store": true, + "index": true, + "include_in_all": true + } + ], + "default_analyzer": "" + }, + "_type": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + } + }, + "default_analyzer": "en" + } + }, + "default_mapping": { + "enabled": true, + "dynamic": true, + "default_analyzer": "standard" + }, + "type_field": "_type", + "default_type": "_default", + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "store_dynamic": true, + "index_dynamic": true, + "analysis": { + "analyzers": {}, + "char_filters": {}, + "tokenizers": {}, + "token_filters": {}, + "token_maps": {} + } +} diff --git a/example/geo_doc1.json b/example/geo_doc1.json deleted file mode 100644 index e94b319..0000000 --- a/example/geo_doc1.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "name": "Brewpub-on-the-Green", - "city": "Fremont", - "state": "California", - "code": "", - "country": "United States", - "phone": "", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [], - "geo": { - "accuracy": "APPROXIMATE", - "lat": 37.5483, - "lon": -121.989 - } -} diff --git a/example/geo_doc2.json b/example/geo_doc2.json deleted file mode 100644 index 9ba8bfd..0000000 --- a/example/geo_doc2.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Capital City Brewing Company", - "city": "Washington", - "state": "District of Columbia", - "code": "20005", - "country": "United States", - "phone": "202.628.2222", - "website": "http://www.capcitybrew.com", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "Washington DC's first brewpub since prohibition, Capitol City Brewing Co. opened its doors in 1992. Our first location still stands in Downtown DC, at 11th and H St., NW. Our company policy is to bring the fine craft of brewing to every person who lives and visits our region, as well as treating them to a wonderful meal and a great experience.", - "address": [ - "1100 New York Ave, NW" - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 38.8999, - "lon": -77.0272 - } -} diff --git a/example/geo_doc3.json b/example/geo_doc3.json deleted file mode 100644 index 008a467..0000000 --- a/example/geo_doc3.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Firehouse Grill & Brewery", - "city": "Sunnyvale", - "state": "California", - "code": "94086", - "country": "United States", - "phone": "1-408-773-9500", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "111 South Murphy Avenue" - ], - "geo": { - "accuracy": "RANGE_INTERPOLATED", - "lat": 37.3775, - "lon": -122.03 - } -} diff --git a/example/geo_doc4.json b/example/geo_doc4.json deleted file mode 100644 index a9655d4..0000000 --- a/example/geo_doc4.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Hook & Ladder Brewing Company", - "city": "Silver Spring", - "state": "Maryland", - "code": "20910", - "country": "United States", - "phone": "301.565.4522", - "website": "http://www.hookandladderbeer.com", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "At Hook & Ladder Brewing we believe in great beer in the company of good friends, so we bring you three great beers for your drinking pleasure (please drink responsibly). Each of our beers is carefully crafted with the finest quality ingredients for a distinctive taste we know you will enjoy. Try one tonight, you just might get hooked. Through our own experiences in the fire and rescue service we have chosen the Hook & Ladder as a symbol of pride and honor to pay tribute to the brave men and women who serve and protect our communities.", - "address": [ - "8113 Fenton St." - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 38.9911, - "lon": -77.0237 - } -} diff --git a/example/geo_doc5.json b/example/geo_doc5.json deleted file mode 100644 index 24e07b0..0000000 --- a/example/geo_doc5.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Jack's Brewing", - "city": "Fremont", - "state": "California", - "code": "94538", - "country": "United States", - "phone": "1-510-796-2036", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "39176 Argonaut Way" - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 37.5441, - "lon": -121.988 - } -} diff --git a/example/geo_doc6.json b/example/geo_doc6.json deleted file mode 100644 index 3c24f34..0000000 --- a/example/geo_doc6.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Sweet Water Tavern and Brewery", - "city": "Sterling", - "state": "Virginia", - "code": "20121", - "country": "United States", - "phone": "(703) 449-1108", - "website": "http://www.greatamericanrestaurants.com/sweetMainSter/index.htm", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "45980 Waterview Plaza" - ], - "geo": { - "accuracy": "RANGE_INTERPOLATED", - "lat": 39.0324, - "lon": -77.4097 - } -} diff --git a/example/geo_search_request.json b/example/geo_search_request.json deleted file mode 100644 index f49261b..0000000 --- a/example/geo_search_request.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "query": { - "location": { - "lon": -122.107799, - "lat": 37.399285 - }, - "distance": "100mi", - "field": "geo" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - { - "by": "geo_distance", - "field": "geo", - "unit": "mi", - "location": { - "lon": -122.107799, - "lat": 37.399285 - } - } - ], - "facets": { - "State count": { - "size": 10, - "field": "state" - }, - "Updated range": { - "size": 10, - "field": "updated", - "date_ranges": [ - { - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z", - "end": "2010-12-31T23:59:59Z" - }, - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z" - } - ] - } - }, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] - } -} diff --git a/example/wiki_bulk_delete.json b/example/wiki_bulk_delete.json deleted file mode 100644 index 3f17c76..0000000 --- a/example/wiki_bulk_delete.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - "arwiki_1", - "bgwiki_1", - "cawiki_1", - "zhwiki_1" -] diff --git a/example/wiki_bulk_index.json b/example/wiki_bulk_index.json deleted file mode 100644 index 42af51b..0000000 --- a/example/wiki_bulk_index.json +++ /dev/null @@ -1,38 +0,0 @@ -[ - { - "id": "arwiki_1", - "fields": { - "title_ar": "محرك بحث", - "text_ar": "محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).", - "timestamp": "2018-03-25T18:04:00Z", - "_type": "arwiki" - } - }, - { - "id": "bgwiki_1", - "fields": { - "title_bg": "Търсачка", - "text_bg": "Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.", - "timestamp": "2018-07-11T11:03:00Z", - "_type": "bgwiki" - } - }, - { - "id": "cawiki_1", - "fields": { - "title_ca": "Motor de cerca", - "text_ca": "Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.", - "timestamp": "2018-07-09T18:07:00Z", - "_type": "cawiki" - } - }, - { - "id": "zhwiki_1", - "fields": { - "title_zh": "搜索引擎", - "text_zh": "搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.", - "timestamp": "2018-08-27T05:47:00Z", - "_type": "zhwiki" - } - } -] diff --git a/example/wiki_doc_arwiki_1.json b/example/wiki_doc_arwiki_1.json deleted file mode 100644 index 8b766f0..0000000 --- a/example/wiki_doc_arwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_ar": "محرك بحث", - "text_ar": "محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).", - "timestamp": "2018-03-25T18:04:00Z", - "_type": "arwiki" -} diff --git a/example/wiki_doc_bgwiki_1.json b/example/wiki_doc_bgwiki_1.json deleted file mode 100644 index 0b585be..0000000 --- a/example/wiki_doc_bgwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_bg": "Търсачка", - "text_bg": "Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.", - "timestamp": "2018-07-11T11:03:00Z", - "_type": "bgwiki" -} diff --git a/example/wiki_doc_cawiki_1.json b/example/wiki_doc_cawiki_1.json deleted file mode 100644 index 119c247..0000000 --- a/example/wiki_doc_cawiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_ca": "Motor de cerca", - "text_ca": "Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.", - "timestamp": "2018-07-09T18:07:00Z", - "_type": "cawiki" -} diff --git a/example/wiki_doc_cswiki_1.json b/example/wiki_doc_cswiki_1.json deleted file mode 100644 index 1f222ef..0000000 --- a/example/wiki_doc_cswiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_cs": "Vyhledávač", - "text_cs": "Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.", - "timestamp": "2017-11-10T21:59:00Z", - "_type": "cswiki" -} diff --git a/example/wiki_doc_dawiki_1.json b/example/wiki_doc_dawiki_1.json deleted file mode 100644 index e38abbb..0000000 --- a/example/wiki_doc_dawiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_da": "Søgemaskine", - "text_da": "En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.", - "timestamp": "2017-09-04T01:54:00Z", - "_type": "dawiki" -} diff --git a/example/wiki_doc_dewiki_1.json b/example/wiki_doc_dewiki_1.json deleted file mode 100644 index ffeb346..0000000 --- a/example/wiki_doc_dewiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_de": "Suchmaschine", - "text_de": "Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.", - "timestamp": "2017-09-04T01:54:00Z", - "_type": "dewiki" -} diff --git a/example/wiki_doc_elwiki_1.json b/example/wiki_doc_elwiki_1.json deleted file mode 100644 index b4eb58e..0000000 --- a/example/wiki_doc_elwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_el": "Μηχανή αναζήτησης", - "text_el": "Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.", - "timestamp": "2017-11-21T19:57:00Z", - "_type": "elwiki" -} diff --git a/example/wiki_doc_enwiki_1.json b/example/wiki_doc_enwiki_1.json deleted file mode 100644 index 0173803..0000000 --- a/example/wiki_doc_enwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" -} diff --git a/example/wiki_doc_eswiki_1.json b/example/wiki_doc_eswiki_1.json deleted file mode 100644 index d1747f8..0000000 --- a/example/wiki_doc_eswiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_es": "Motor de búsqueda", - "text_es": "Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.", - "timestamp": "2018-08-30T11:30:00Z", - "_type": "eswiki" -} diff --git a/example/wiki_doc_fawiki_1.json b/example/wiki_doc_fawiki_1.json deleted file mode 100644 index 1457b00..0000000 --- a/example/wiki_doc_fawiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_fa": "موتور جستجو (پردازش)", - "text_fa": "موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.", - "timestamp": "2017-01-06T02:46:00Z", - "_type": "fawiki" -} diff --git a/example/wiki_doc_fiwiki_1.json b/example/wiki_doc_fiwiki_1.json deleted file mode 100644 index 78d6861..0000000 --- a/example/wiki_doc_fiwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_fi": "Hakukone", - "text_fi": "Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.", - "timestamp": "2017-10-04T14:33:00Z", - "_type": "fiwiki" -} diff --git a/example/wiki_doc_frwiki_1.json b/example/wiki_doc_frwiki_1.json deleted file mode 100644 index f90c893..0000000 --- a/example/wiki_doc_frwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_fr": "Moteur de recherche", - "text_fr": "Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.", - "timestamp": "2018-05-30T15:15:00Z", - "_type": "frwiki" -} diff --git a/example/wiki_doc_gawiki_1.json b/example/wiki_doc_gawiki_1.json deleted file mode 100644 index 492dc58..0000000 --- a/example/wiki_doc_gawiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_ga": "Inneall cuardaigh", - "text_ga": "Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.", - "timestamp": "2013-10-27T18:17:00Z", - "_type": "gawiki" -} diff --git a/example/wiki_doc_glwiki_1.json b/example/wiki_doc_glwiki_1.json deleted file mode 100644 index 8d1e981..0000000 --- a/example/wiki_doc_glwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_gl": "Motor de busca", - "text_gl": "Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.", - "timestamp": "2016-10-31T13:33:00Z", - "_type": "glwiki" -} diff --git a/example/wiki_doc_guwiki_1.json b/example/wiki_doc_guwiki_1.json deleted file mode 100644 index eb0cffd..0000000 --- a/example/wiki_doc_guwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_gu": "વેબ શોધ એન્જીન", - "text_gu": "વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.", - "timestamp": "2013-04-04T19:28:00Z", - "_type": "guwiki" -} diff --git a/example/wiki_doc_hiwiki_1.json b/example/wiki_doc_hiwiki_1.json deleted file mode 100644 index 59456f8..0000000 --- a/example/wiki_doc_hiwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_hi": "खोज इंजन", - "text_hi": "ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।", - "timestamp": "2017-10-19T20:09:00Z", - "_type": "hiwiki" -} diff --git a/example/wiki_doc_huwiki_1.json b/example/wiki_doc_huwiki_1.json deleted file mode 100644 index d2595ec..0000000 --- a/example/wiki_doc_huwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_hu": "Keresőmotor", - "text_hu": "A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.", - "timestamp": "2018-05-15T20:40:00Z", - "_type": "huwiki" -} diff --git a/example/wiki_doc_hywiki_1.json b/example/wiki_doc_hywiki_1.json deleted file mode 100644 index 4d9e1a2..0000000 --- a/example/wiki_doc_hywiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_hy": "Որոնողական համակարգ", - "text_hy": "Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։", - "timestamp": "2017-11-20T17:47:00Z", - "_type": "hywiki" -} diff --git a/example/wiki_doc_idwiki_1.json b/example/wiki_doc_idwiki_1.json deleted file mode 100644 index 262ebeb..0000000 --- a/example/wiki_doc_idwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_id": "Mesin pencari web", - "text_id": "Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.", - "timestamp": "2017-11-20T17:47:00Z", - "_type": "idwiki" -} diff --git a/example/wiki_doc_itwiki_1.json b/example/wiki_doc_itwiki_1.json deleted file mode 100644 index c58fbfa..0000000 --- a/example/wiki_doc_itwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_it": "Motore di ricerca", - "text_it": "Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.", - "timestamp": "2018-07-16T12:20:00Z", - "_type": "itwiki" -} diff --git a/example/wiki_doc_jawiki_1.json b/example/wiki_doc_jawiki_1.json deleted file mode 100644 index db74184..0000000 --- a/example/wiki_doc_jawiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_ja": "検索エンジン", - "text_ja": "検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。", - "timestamp": "2018-05-30T00:52:00Z", - "_type": "jawiki" -} diff --git a/example/wiki_doc_knwiki_1.json b/example/wiki_doc_knwiki_1.json deleted file mode 100644 index cdd3ac0..0000000 --- a/example/wiki_doc_knwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ", - "text_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.", - "timestamp": "2017-10-03T14:13:00Z", - "_type": "knwiki" -} diff --git a/example/wiki_doc_kowiki_1.json b/example/wiki_doc_kowiki_1.json deleted file mode 100644 index 57ff513..0000000 --- a/example/wiki_doc_kowiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_cjk": "검색 엔진", - "text_cjk": "검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.", - "timestamp": "2017-11-19T12:50:00Z", - "_type": "kowiki" -} diff --git a/example/wiki_doc_mlwiki_1.json b/example/wiki_doc_mlwiki_1.json deleted file mode 100644 index d172ff4..0000000 --- a/example/wiki_doc_mlwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_ml": "വെബ് സെർച്ച് എഞ്ചിൻ", - "text_ml": "വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.", - "timestamp": "2010-05-05T15:06:00Z", - "_type": "mlwiki" -} diff --git a/example/wiki_doc_nlwiki_1.json b/example/wiki_doc_nlwiki_1.json deleted file mode 100644 index d2ada6a..0000000 --- a/example/wiki_doc_nlwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_nl": "Zoekmachine", - "text_nl": "Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.", - "timestamp": "2018-05-07T11:05:00Z", - "_type": "nlwiki" -} diff --git a/example/wiki_doc_nowiki_1.json b/example/wiki_doc_nowiki_1.json deleted file mode 100644 index 0b01a24..0000000 --- a/example/wiki_doc_nowiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_no": "Søkemotor", - "text_no": "En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.", - "timestamp": "2018-02-05T14:15:00Z", - "_type": "nowiki" -} diff --git a/example/wiki_doc_pswiki_1.json b/example/wiki_doc_pswiki_1.json deleted file mode 100644 index b0ba67f..0000000 --- a/example/wiki_doc_pswiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_ps": "انټرنټ لټوونکی ماشين", - "text_ps": "نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.", - "timestamp": "2015-12-15T18:53:00Z", - "_type": "pswiki" -} diff --git a/example/wiki_doc_ptwiki_1.json b/example/wiki_doc_ptwiki_1.json deleted file mode 100644 index 8fb25c4..0000000 --- a/example/wiki_doc_ptwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_pt": "Motor de busca", - "text_pt": "Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).", - "timestamp": "2017-11-09T14:38:00Z", - "_type": "ptwiki" -} diff --git a/example/wiki_doc_rowiki_1.json b/example/wiki_doc_rowiki_1.json deleted file mode 100644 index ca80608..0000000 --- a/example/wiki_doc_rowiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_ro": "Motor de căutare", - "text_ro": "Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).", - "timestamp": "2018-06-12T08:59:00Z", - "_type": "rowiki" -} diff --git a/example/wiki_doc_ruwiki_1.json b/example/wiki_doc_ruwiki_1.json deleted file mode 100644 index 3733d50..0000000 --- a/example/wiki_doc_ruwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_ru": "Поисковая машина", - "text_ru": "Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.", - "timestamp": "2017-03-22T01:16:00Z", - "_type": "ruwiki" -} diff --git a/example/wiki_doc_svwiki_1.json b/example/wiki_doc_svwiki_1.json deleted file mode 100644 index 43f56cb..0000000 --- a/example/wiki_doc_svwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_sv": "Söktjänst", - "text_sv": "En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.", - "timestamp": "2018-08-16T22:13:00Z", - "_type": "svwiki" -} diff --git a/example/wiki_doc_tawiki_1.json b/example/wiki_doc_tawiki_1.json deleted file mode 100644 index 5f46729..0000000 --- a/example/wiki_doc_tawiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_ta": "தேடுபொறி", - "text_ta": "தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.", - "timestamp": "2017-12-24T10:30:00Z", - "_type": "tawiki" -} diff --git a/example/wiki_doc_tewiki_1.json b/example/wiki_doc_tewiki_1.json deleted file mode 100644 index b014c8f..0000000 --- a/example/wiki_doc_tewiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_te": "వెబ్ శోధనా యంత్రం", - "text_te": "వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.", - "timestamp": "2017-06-19T11:22:00Z", - "_type": "tewiki" -} diff --git a/example/wiki_doc_thwiki_1.json b/example/wiki_doc_thwiki_1.json deleted file mode 100644 index 81a233f..0000000 --- a/example/wiki_doc_thwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_th": "เสิร์ชเอนจิน", - "text_th": "เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป", - "timestamp": "2016-06-18T11:06:00Z", - "_type": "thwiki" -} diff --git a/example/wiki_doc_trwiki_1.json b/example/wiki_doc_trwiki_1.json deleted file mode 100644 index bedbd13..0000000 --- a/example/wiki_doc_trwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_tr": "Arama motoru", - "text_tr": "Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.", - "timestamp": "2018-03-13T17:37:00Z", - "_type": "trwiki" -} diff --git a/example/wiki_doc_zhwiki_1.json b/example/wiki_doc_zhwiki_1.json deleted file mode 100644 index f997795..0000000 --- a/example/wiki_doc_zhwiki_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "title_zh": "搜索引擎", - "text_zh": "搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.", - "timestamp": "2018-08-27T05:47:00Z", - "_type": "zhwiki" -} diff --git a/example/wiki_search_request.json b/example/wiki_search_request.json deleted file mode 100644 index c189f9f..0000000 --- a/example/wiki_search_request.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - "-_score", - "_id", - "-timestamp" - ], - "facets": { - "Type count": { - "size": 10, - "field": "_type" - }, - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z", - "end": "2010-12-31T23:59:59Z" - }, - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z" - } - ] - } - }, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] - } -} diff --git a/example/wiki_search_request_simple.json b/example/wiki_search_request_simple.json deleted file mode 100644 index 9ed3040..0000000 --- a/example/wiki_search_request_simple.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "query": { - "query": "+text_en:search" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - "-_score" - ] -} diff --git a/examples/example_bulk_delete.txt b/examples/example_bulk_delete.txt new file mode 100644 index 0000000..3bb459b --- /dev/null +++ b/examples/example_bulk_delete.txt @@ -0,0 +1,11 @@ +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 diff --git a/examples/example_bulk_index.json b/examples/example_bulk_index.json new file mode 100644 index 0000000..fab37a7 --- /dev/null +++ b/examples/example_bulk_index.json @@ -0,0 +1,11 @@ +{"id": "1","fields": {"title": "Blast", "text": "Blast is a full text search and indexing server, written in Go, built on top of Bleve.", "timestamp": "2019-12-16T07:12:00Z", "_type": "example"}} +{"id": "2","fields": {"title": "Bleve", "text": "Bleve is a modern text indexing library for go.", "timestamp": "2019-10-30T16:13:00Z", "_type": "example"}} +{"id": "3","fields": {"title": "Riot", "text": "Riot is Go Open Source, Distributed, Simple and efficient full text search engine.", "timestamp": "2019-12-16T07:12:00Z", "_type": "example"}} +{"id": "4","fields": {"title": "Bayard", "text": "Bayard is a full text search and indexing server, written in Rust, built on top of Tantivy.", "timestamp": "2019-12-19T10:41:00Z", "_type": "example"}} +{"id": "5","fields": {"title": "Toshi", "text": "Toshi is meant to be a full-text search engine similar to Elasticsearch. Toshi strives to be to Elasticsearch what Tantivy is to Lucene.", "timestamp": "2019-12-02T04:00:00Z", "_type": "example"}} +{"id": "6","fields": {"title": "Tantivy", "text": "Tantivy is a full-text search engine library inspired by Apache Lucene and written in Rust.", "timestamp": "2019-12-19T10:07:00Z", "_type": "example"}} +{"id": "7","fields": {"title": "Sonic", "text": "Sonic is a fast, lightweight and schema-less search backend.", "timestamp": "2019-12-10T23:13:00Z", "_type": "example"}} +{"id": "8","fields": {"title": "Apache Solr", "text": "Solr is highly reliable, scalable and fault tolerant, providing distributed indexing, replication and load-balanced querying, automated failover and recovery, centralized configuration and more.", "timestamp": "2019-12-19T14:08:00Z", "_type": "example"}} +{"id": "9","fields": {"title": "Elasticsearch", "text": "Elasticsearch is a distributed, open source search and analytics engine for all types of data, including textual, numerical, geospatial, structured, and unstructured.", "timestamp": "2019-12-19T08:19:00Z", "_type": "example"}} +{"id": "10","fields": {"title": "Lucene", "text": "Apache Lucene is a high-performance, full-featured text search engine library written entirely in Java.", "timestamp": "2019-12-19T14:08:00Z", "_type": "example"}} +{"id": "11","fields": {"title": "Whoosh", "text": "Whoosh is a fast, pure Python search engine library.", "timestamp": "2019-10-08T05:30:26Z", "_type": "example"}} diff --git a/examples/example_doc_1.json b/examples/example_doc_1.json new file mode 100644 index 0000000..09f6cad --- /dev/null +++ b/examples/example_doc_1.json @@ -0,0 +1,8 @@ +{ + "fields": { + "title": "Blast", + "text": "Blast is a full text search and indexing server, written in Go, built on top of Bleve.", + "timestamp": "2019-12-16T07:12:00Z", + "_type": "example" + } +} diff --git a/examples/example_mapping.json b/examples/example_mapping.json new file mode 100644 index 0000000..118348c --- /dev/null +++ b/examples/example_mapping.json @@ -0,0 +1,103 @@ +{ + "types": { + "example": { + "enabled": true, + "dynamic": true, + "properties": { + "title": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "text": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "url": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + }, + "timestamp": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "datetime", + "store": true, + "index": true, + "include_in_all": true + } + ], + "default_analyzer": "" + }, + "_type": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + } + }, + "default_analyzer": "en" + } + }, + "default_mapping": { + "enabled": true, + "dynamic": true, + "default_analyzer": "standard" + }, + "type_field": "_type", + "default_type": "_default", + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "store_dynamic": true, + "index_dynamic": true, + "analysis": { + "analyzers": {}, + "char_filters": {}, + "tokenizers": {}, + "token_filters": {}, + "token_maps": {} + } +} diff --git a/examples/example_search_request.json b/examples/example_search_request.json new file mode 100644 index 0000000..3566d99 --- /dev/null +++ b/examples/example_search_request.json @@ -0,0 +1,46 @@ +{ + "search_request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score", + "_id", + "-timestamp" + ], + "facets": { + "Type count": { + "size": 10, + "field": "_type" + }, + "Timestamp range": { + "size": 10, + "field": "timestamp", + "date_ranges": [ + { + "name": "2001 - 2010", + "start": "2001-01-01T00:00:00Z", + "end": "2010-12-31T23:59:59Z" + }, + { + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z", + "end": "2020-12-31T23:59:59Z" + } + ] + } + }, + "highlight": { + "style": "html", + "fields": [ + "title", + "text" + ] + } + } +} diff --git a/examples/example_search_request_prefix.json b/examples/example_search_request_prefix.json new file mode 100644 index 0000000..0de0b37 --- /dev/null +++ b/examples/example_search_request_prefix.json @@ -0,0 +1,16 @@ +{ + "search_request": { + "query": { + "prefix": "searc", + "field": "title_en" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] + } +} diff --git a/examples/example_search_request_simple.json b/examples/example_search_request_simple.json new file mode 100644 index 0000000..39a3e93 --- /dev/null +++ b/examples/example_search_request_simple.json @@ -0,0 +1,15 @@ +{ + "search_request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] + } +} diff --git a/examples/geo_example_bulk_index.json b/examples/geo_example_bulk_index.json new file mode 100644 index 0000000..fbcbad7 --- /dev/null +++ b/examples/geo_example_bulk_index.json @@ -0,0 +1,6 @@ +{"id":"1","fields":{"name":"Brewpub-on-the-Green","city":"Fremont","state":"California","code":"","country":"United States","phone":"","website":"","updated":"2010-07-22 20:00:20","description":"","address":[],"geo":{"accuracy":"APPROXIMATE","lat":37.5483,"lon":-121.989},"_type":"geo_example"}} +{"id":"2","fields":{"name":"Capital City Brewing Company","city":"Washington","state":"District of Columbia","code":"20005","country":"United States","phone":"202.628.2222","website":"http://www.capcitybrew.com","updated":"2010-07-22 20:00:20","description":"Washington DC's first brewpub since prohibition, Capitol City Brewing Co. opened its doors in 1992. Our first location still stands in Downtown DC, at 11th and H St., NW. Our company policy is to bring the fine craft of brewing to every person who lives and visits our region, as well as treating them to a wonderful meal and a great experience.","address":["1100 New York Ave, NW"],"geo":{"accuracy":"ROOFTOP","lat":38.8999,"lon":-77.0272},"_type":"geo_example"}} +{"id":"3","fields":{"name":"Firehouse Grill & Brewery","city":"Sunnyvale","state":"California","code":"94086","country":"United States","phone":"1-408-773-9500","website":"","updated":"2010-07-22 20:00:20","description":"","address":["111 South Murphy Avenue"],"geo":{"accuracy":"RANGE_INTERPOLATED","lat":37.3775,"lon":-122.03},"_type":"geo_example"}} +{"id":"4","fields":{"name":"Hook & Ladder Brewing Company","city":"Silver Spring","state":"Maryland","code":"20910","country":"United States","phone":"301.565.4522","website":"http://www.hookandladderbeer.com","updated":"2010-07-22 20:00:20","description":"At Hook & Ladder Brewing we believe in great beer in the company of good friends, so we bring you three great beers for your drinking pleasure (please drink responsibly). Each of our beers is carefully crafted with the finest quality ingredients for a distinctive taste we know you will enjoy. Try one tonight, you just might get hooked. Through our own experiences in the fire and rescue service we have chosen the Hook & Ladder as a symbol of pride and honor to pay tribute to the brave men and women who serve and protect our communities.","address":["8113 Fenton St."],"geo":{"accuracy":"ROOFTOP","lat":38.9911,"lon":-77.0237},"_type":"geo_example"}} +{"id":"5","fields":{"name":"Jack's Brewing","city":"Fremont","state":"California","code":"94538","country":"United States","phone":"1-510-796-2036","website":"","updated":"2010-07-22 20:00:20","description":"","address":["39176 Argonaut Way"],"geo":{"accuracy":"ROOFTOP","lat":37.5441,"lon":-121.988},"_type":"geo_example"}} +{"id":"6","fields":{"name":"Sweet Water Tavern and Brewery","city":"Sterling","state":"Virginia","code":"20121","country":"United States","phone":"(703) 449-1108","website":"http://www.greatamericanrestaurants.com/sweetMainSter/index.htm","updated":"2010-07-22 20:00:20","description":"","address":["45980 Waterview Plaza"],"geo":{"accuracy":"RANGE_INTERPOLATED","lat":39.0324,"lon":-77.4097},"_type":"geo_example"}} diff --git a/examples/geo_example_doc_1.json b/examples/geo_example_doc_1.json new file mode 100644 index 0000000..c359461 --- /dev/null +++ b/examples/geo_example_doc_1.json @@ -0,0 +1,20 @@ +{ + "fields": { + "name": "Brewpub-on-the-Green", + "city": "Fremont", + "state": "California", + "code": "", + "country": "United States", + "phone": "", + "website": "", + "updated": "2010-07-22 20:00:20", + "description": "", + "address": [], + "geo": { + "accuracy": "APPROXIMATE", + "lat": 37.5483, + "lon": -121.989 + }, + "_type": "geo_example" + } +} diff --git a/example/geo_index_mapping.json b/examples/geo_example_mapping.json similarity index 60% rename from example/geo_index_mapping.json rename to examples/geo_example_mapping.json index f067367..ba7769e 100644 --- a/example/geo_index_mapping.json +++ b/examples/geo_example_mapping.json @@ -1,6 +1,6 @@ { "types": { - "brewery": { + "geo_example": { "properties": { "name": { "fields": [ @@ -32,5 +32,23 @@ } } }, - "default_type": "brewery" + "default_mapping": { + "enabled": true, + "dynamic": true, + "default_analyzer": "standard" + }, + "type_field": "_type", + "default_type": "_default", + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "store_dynamic": true, + "index_dynamic": true, + "analysis": { + "analyzers": {}, + "char_filters": {}, + "tokenizers": {}, + "token_filters": {}, + "token_maps": {} + } } diff --git a/examples/geo_example_search_request.json b/examples/geo_example_search_request.json new file mode 100644 index 0000000..2883245 --- /dev/null +++ b/examples/geo_example_search_request.json @@ -0,0 +1,28 @@ +{ + "search_request": { + "query": { + "location": { + "lon": -122.107799, + "lat": 37.399285 + }, + "distance": "100mi", + "field": "geo" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + { + "by": "geo_distance", + "field": "geo", + "unit": "mi", + "location": { + "lon": -122.107799, + "lat": 37.399285 + } + } + ] + } +} diff --git a/examples/multiple_type_example_bulk_index.json b/examples/multiple_type_example_bulk_index.json new file mode 100644 index 0000000..b8ab6ff --- /dev/null +++ b/examples/multiple_type_example_bulk_index.json @@ -0,0 +1,36 @@ +{"id":"ar_1","fields":{"title_ar":"محرك بحث","text_ar":"محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).","timestamp":"2018-03-25T18:04:00Z","_type":"ar"}} +{"id":"bg_1","fields":{"title_bg":"Търсачка","text_bg":"Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.","timestamp":"2018-07-11T11:03:00Z","_type":"bg"}} +{"id":"ca_1","fields":{"title_ca":"Motor de cerca","text_ca":"Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.","timestamp":"2018-07-09T18:07:00Z","_type":"ca"}} +{"id":"cs_1","fields":{"title_cs":"Vyhledávač","text_cs":"Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.","timestamp":"2017-11-10T21:59:00Z","_type":"cs"}} +{"id":"da_1","fields":{"title_da":"Søgemaskine","text_da":"En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.","timestamp":"2017-09-04T01:54:00Z","_type":"da"}} +{"id":"de_1","fields":{"title_de":"Suchmaschine","text_de":"Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.","timestamp":"2017-09-04T01:54:00Z","_type":"de"}} +{"id":"el_1","fields":{"title_el":"Μηχανή αναζήτησης","text_el":"Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.","timestamp":"2017-11-21T19:57:00Z","_type":"el"}} +{"id":"en_1","fields":{"title_en":"Search engine (computing)","text_en":"A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.","timestamp":"2018-07-04T05:41:00Z","_type":"en"}} +{"id":"es_1","fields":{"title_es":"Motor de búsqueda","text_es":"Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.","timestamp":"2018-08-30T11:30:00Z","_type":"es"}} +{"id":"fa_1","fields":{"title_fa":"موتور جستجو (پردازش)","text_fa":"موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.","timestamp":"2017-01-06T02:46:00Z","_type":"fa"}} +{"id":"fi_1","fields":{"title_fi":"Hakukone","text_fi":"Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.","timestamp":"2017-10-04T14:33:00Z","_type":"fi"}} +{"id":"fr_1","fields":{"title_fr":"Moteur de recherche","text_fr":"Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.","timestamp":"2018-05-30T15:15:00Z","_type":"fr"}} +{"id":"ga_1","fields":{"title_ga":"Inneall cuardaigh","text_ga":"Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.","timestamp":"2013-10-27T18:17:00Z","_type":"ga"}} +{"id":"gl_1","fields":{"title_gl":"Motor de busca","text_gl":"Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.","timestamp":"2016-10-31T13:33:00Z","_type":"gl"}} +{"id":"gu_1","fields":{"title_gu":"વેબ શોધ એન્જીન","text_gu":"વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.","timestamp":"2013-04-04T19:28:00Z","_type":"gu"}} +{"id":"hi_1","fields":{"title_hi":"खोज इंजन","text_hi":"ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।","timestamp":"2017-10-19T20:09:00Z","_type":"hi"}} +{"id":"hu_1","fields":{"title_hu":"Keresőmotor","text_hu":"A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.","timestamp":"2018-05-15T20:40:00Z","_type":"hu"}} +{"id":"hy_1","fields":{"title_hy":"Որոնողական համակարգ","text_hy":"Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։","timestamp":"2017-11-20T17:47:00Z","_type":"hy"}} +{"id":"id_1","fields":{"title_id":"Mesin pencari web","text_id":"Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.","timestamp":"2017-11-20T17:47:00Z","_type":"id"}} +{"id":"it_1","fields":{"title_it":"Motore di ricerca","text_it":"Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.","timestamp":"2018-07-16T12:20:00Z","_type":"it"}} +{"id":"ja_1","fields":{"title_ja":"検索エンジン","text_ja":"検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。","timestamp":"2018-05-30T00:52:00Z","_type":"ja"}} +{"id":"kn_1","fields":{"title_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ","text_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.","timestamp":"2017-10-03T14:13:00Z","_type":"kn"}} +{"id":"ko_1","fields":{"title_cjk":"검색 엔진","text_cjk":"검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.","timestamp":"2017-11-19T12:50:00Z","_type":"ko"}} +{"id":"ml_1","fields":{"title_ml":"വെബ് സെർച്ച് എഞ്ചിൻ","text_ml":"വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.","timestamp":"2010-05-05T15:06:00Z","_type":"ml"}} +{"id":"nl_1","fields":{"title_nl":"Zoekmachine","text_nl":"Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.","timestamp":"2018-05-07T11:05:00Z","_type":"nl"}} +{"id":"no_1","fields":{"title_no":"Søkemotor","text_no":"En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.","timestamp":"2018-02-05T14:15:00Z","_type":"no"}} +{"id":"ps_1","fields":{"title_ps":"انټرنټ لټوونکی ماشين","text_ps":"نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.","timestamp":"2015-12-15T18:53:00Z","_type":"ps"}} +{"id":"pt_1","fields":{"title_pt":"Motor de busca","text_pt":"Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).","timestamp":"2017-11-09T14:38:00Z","_type":"pt"}} +{"id":"ro_1","fields":{"title_ro":"Motor de căutare","text_ro":"Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).","timestamp":"2018-06-12T08:59:00Z","_type":"ro"}} +{"id":"ru_1","fields":{"title_ru":"Поисковая машина","text_ru":"Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.","timestamp":"2017-03-22T01:16:00Z","_type":"ru"}} +{"id":"sv_1","fields":{"title_sv":"Söktjänst","text_sv":"En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.","timestamp":"2018-08-16T22:13:00Z","_type":"sv"}} +{"id":"ta_1","fields":{"title_ta":"தேடுபொறி","text_ta":"தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.","timestamp":"2017-12-24T10:30:00Z","_type":"ta"}} +{"id":"te_1","fields":{"title_te":"వెబ్ శోధనా యంత్రం","text_te":"వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.","timestamp":"2017-06-19T11:22:00Z","_type":"te"}} +{"id":"th_1","fields":{"title_th":"เสิร์ชเอนจิน","text_th":"เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป","timestamp":"2016-06-18T11:06:00Z","_type":"th"}} +{"id":"tr_1","fields":{"title_tr":"Arama motoru","text_tr":"Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.","timestamp":"2018-03-13T17:37:00Z","_type":"tr"}} +{"id":"zh_1","fields":{"title_zh":"搜索引擎","text_zh":"搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.","timestamp":"2018-08-27T05:47:00Z","_type":"zh"}} diff --git a/example/wiki_index_mapping.json b/examples/multiple_type_example_mapping.json similarity index 99% rename from example/wiki_index_mapping.json rename to examples/multiple_type_example_mapping.json index ac7c43b..36b6522 100644 --- a/example/wiki_index_mapping.json +++ b/examples/multiple_type_example_mapping.json @@ -1,6 +1,6 @@ { "types": { - "arwiki": { + "ar": { "enabled": true, "dynamic": true, "properties": { @@ -80,7 +80,7 @@ }, "default_analyzer": "ar" }, - "bgwiki": { + "bg": { "enabled": true, "dynamic": true, "properties": { @@ -160,7 +160,7 @@ }, "default_analyzer": "bg" }, - "cawiki": { + "ca": { "enabled": true, "dynamic": true, "properties": { @@ -240,7 +240,7 @@ }, "default_analyzer": "ca" }, - "cswiki": { + "cs": { "enabled": true, "dynamic": true, "properties": { @@ -320,7 +320,7 @@ }, "default_analyzer": "cs" }, - "dawiki": { + "da": { "enabled": true, "dynamic": true, "properties": { @@ -400,7 +400,7 @@ }, "default_analyzer": "da" }, - "dewiki": { + "de": { "enabled": true, "dynamic": true, "properties": { @@ -480,7 +480,7 @@ }, "default_analyzer": "de" }, - "elwiki": { + "el": { "enabled": true, "dynamic": true, "properties": { @@ -560,7 +560,7 @@ }, "default_analyzer": "el" }, - "enwiki": { + "en": { "enabled": true, "dynamic": true, "properties": { @@ -640,7 +640,7 @@ }, "default_analyzer": "en" }, - "eswiki": { + "es": { "enabled": true, "dynamic": true, "properties": { @@ -720,7 +720,7 @@ }, "default_analyzer": "es" }, - "fawiki": { + "fa": { "enabled": true, "dynamic": true, "properties": { @@ -800,7 +800,7 @@ }, "default_analyzer": "fa" }, - "fiwiki": { + "fi": { "enabled": true, "dynamic": true, "properties": { @@ -880,7 +880,7 @@ }, "default_analyzer": "fi" }, - "frwiki": { + "fr": { "enabled": true, "dynamic": true, "properties": { @@ -960,7 +960,7 @@ }, "default_analyzer": "fr" }, - "gawiki": { + "ga": { "enabled": true, "dynamic": true, "properties": { @@ -1040,7 +1040,7 @@ }, "default_analyzer": "ga" }, - "glwiki": { + "gl": { "enabled": true, "dynamic": true, "properties": { @@ -1120,7 +1120,7 @@ }, "default_analyzer": "gl" }, - "guwiki": { + "gu": { "enabled": true, "dynamic": true, "properties": { @@ -1200,7 +1200,7 @@ }, "default_analyzer": "in" }, - "hiwiki": { + "hi": { "enabled": true, "dynamic": true, "properties": { @@ -1280,7 +1280,7 @@ }, "default_analyzer": "hi" }, - "huwiki": { + "hu": { "enabled": true, "dynamic": true, "properties": { @@ -1360,7 +1360,7 @@ }, "default_analyzer": "hu" }, - "hywiki": { + "hy": { "enabled": true, "dynamic": true, "properties": { @@ -1440,7 +1440,7 @@ }, "default_analyzer": "hy" }, - "idwiki": { + "id": { "enabled": true, "dynamic": true, "properties": { @@ -1520,7 +1520,7 @@ }, "default_analyzer": "id" }, - "itwiki": { + "it": { "enabled": true, "dynamic": true, "properties": { @@ -1600,7 +1600,7 @@ }, "default_analyzer": "it" }, - "jawiki": { + "ja": { "enabled": true, "dynamic": true, "properties": { @@ -1680,7 +1680,7 @@ }, "default_analyzer": "ja" }, - "knwiki": { + "kn": { "enabled": true, "dynamic": true, "properties": { @@ -1760,7 +1760,7 @@ }, "default_analyzer": "in" }, - "kowiki": { + "ko": { "enabled": true, "dynamic": true, "properties": { @@ -1840,7 +1840,7 @@ }, "default_analyzer": "cjk" }, - "mlwiki": { + "ml": { "enabled": true, "dynamic": true, "properties": { @@ -1920,7 +1920,7 @@ }, "default_analyzer": "in" }, - "nlwiki": { + "nl": { "enabled": true, "dynamic": true, "properties": { @@ -2000,7 +2000,7 @@ }, "default_analyzer": "nl" }, - "nowiki": { + "no": { "enabled": true, "dynamic": true, "properties": { @@ -2080,7 +2080,7 @@ }, "default_analyzer": "no" }, - "pswiki": { + "ps": { "enabled": true, "dynamic": true, "properties": { @@ -2160,7 +2160,7 @@ }, "default_analyzer": "ckb" }, - "ptwiki": { + "pt": { "enabled": true, "dynamic": true, "properties": { @@ -2240,7 +2240,7 @@ }, "default_analyzer": "pt" }, - "rowiki": { + "ro": { "enabled": true, "dynamic": true, "properties": { @@ -2320,7 +2320,7 @@ }, "default_analyzer": "ro" }, - "ruwiki": { + "ru": { "enabled": true, "dynamic": true, "properties": { @@ -2400,7 +2400,7 @@ }, "default_analyzer": "ru" }, - "svwiki": { + "sv": { "enabled": true, "dynamic": true, "properties": { @@ -2480,7 +2480,7 @@ }, "default_analyzer": "sv" }, - "tawiki": { + "ta": { "enabled": true, "dynamic": true, "properties": { @@ -2560,7 +2560,7 @@ }, "default_analyzer": "in" }, - "tewiki": { + "te": { "enabled": true, "dynamic": true, "properties": { @@ -2640,7 +2640,7 @@ }, "default_analyzer": "in" }, - "thwiki": { + "th": { "enabled": true, "dynamic": true, "properties": { @@ -2720,7 +2720,7 @@ }, "default_analyzer": "th" }, - "trwiki": { + "tr": { "enabled": true, "dynamic": true, "properties": { @@ -2800,7 +2800,7 @@ }, "default_analyzer": "tr" }, - "zhwiki": { + "zh": { "enabled": true, "dynamic": true, "properties": { diff --git a/go.mod b/go.mod index e9874b1..cc8f834 100644 --- a/go.mod +++ b/go.mod @@ -1,54 +1,29 @@ module github.com/mosuka/blast -go 1.12 +go 1.14 require ( - github.com/armon/gomdb v0.0.0-20180202201627-75f545a47e89 // indirect - github.com/blevesearch/bleve v0.7.0 - github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 // indirect - github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 // indirect - github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d // indirect - github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6 // indirect - github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 // indirect - github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect - github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect - github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect - github.com/dgryski/go-farm v0.0.0-20190323231341-8198c7b169ec // indirect - github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect - github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect - github.com/golang/protobuf v1.3.1 - github.com/gorilla/mux v1.7.0 - github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 + github.com/bbva/raft-badger v1.0.1 + github.com/blevesearch/bleve/v2 v2.0.0 + github.com/blevesearch/bleve_index_api v1.0.0 + github.com/dgraph-io/badger/v2 v2.0.3 + github.com/gogo/protobuf v1.3.0 // indirect + github.com/golang/protobuf v1.4.2 + github.com/gorilla/handlers v1.4.2 + github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/golang-lru v0.5.1 // indirect - github.com/hashicorp/raft v1.1.0 - github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 - github.com/hashicorp/raft-mdb v0.0.0-20180824152511-9ee9663b6ffa - github.com/ikawaha/kagome.ipadic v1.0.1 // indirect - github.com/imdario/mergo v0.3.7 - github.com/jmhodges/levigo v1.0.0 // indirect - github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69 - github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 - github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68 + github.com/grpc-ecosystem/grpc-gateway v1.14.6 + github.com/hashicorp/raft v1.1.2 + github.com/mash/go-accesslog v1.1.0 + github.com/mitchellh/go-homedir v1.1.0 github.com/natefinch/lumberjack v2.0.0+incompatible - github.com/prometheus/client_golang v0.9.2 - github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect - github.com/prometheus/common v0.2.0 // indirect - github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect - github.com/stretchr/objx v0.1.1 - github.com/stretchr/testify v1.3.0 - github.com/syndtr/goleveldb v1.0.0 // indirect - github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19 // indirect - github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 // indirect - github.com/urfave/cli v1.20.0 - go.uber.org/atomic v1.4.0 // indirect - go.uber.org/multierr v1.1.0 // indirect - go.uber.org/zap v1.10.0 - golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 // indirect - google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d // indirect - google.golang.org/grpc v1.19.1 + github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/common v0.9.1 + github.com/spf13/cobra v0.0.7 + github.com/spf13/viper v1.4.0 + go.uber.org/zap v1.15.0 + google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 + google.golang.org/grpc v1.29.1 + google.golang.org/protobuf v1.23.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect - gopkg.in/yaml.v2 v2.2.2 ) diff --git a/go.sum b/go.sum index a38a8a7..8dbb0d6 100644 --- a/go.sum +++ b/go.sum @@ -1,155 +1,232 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78= -github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= -github.com/Smerity/govarint v0.0.0-20150407073650-7265e41f48f1 h1:G/NOANWMQev0CftoyxQwtRakdyNNNMB3qxkt/tj1HGs= -github.com/Smerity/govarint v0.0.0-20150407073650-7265e41f48f1/go.mod h1:o80NPAib/LOl8Eysqppjj7kkGkqz++eqzYGlvROpDcQ= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= +github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/gomdb v0.0.0-20180202201627-75f545a47e89 h1:A1SPjPcl2LdF2Skv9Zt41jWu4XYQAyvBDzrveQjlkhQ= -github.com/armon/gomdb v0.0.0-20180202201627-75f545a47e89/go.mod h1:wSblbytRgcqD+U+gGCKz5145DyjUYPh5fqh2uyXxfZw= +github.com/bbva/raft-badger v1.0.1 h1:CytsAQ3KbyX/I73Sp+shryUUVL7eElWpfsNmV/6vDcM= +github.com/bbva/raft-badger v1.0.1/go.mod h1:g7ufi3iTshR7TjNy5GyTPGzNS2/gKl2wK27d5QYRsZw= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/blevesearch/bleve v0.7.0 h1:znyZ3zjsh2Scr60vszs7rbF29TU6i1q9bfnZf1vh0Ac= -github.com/blevesearch/bleve v0.7.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= -github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 h1:U6vnxZrTfItfiUiYx0lf/LgHjRSfaKK5QHSom3lEbnA= -github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= -github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 h1:ZPImXwzC+ICkkSYlPP9mMVgQlZH24+56rIEUjVxfFnY= -github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= -github.com/blevesearch/go-porterstemmer v1.0.2 h1:qe7n69gBd1OLY5sHKnxQHIbzn0LNJA4hpAf+5XDxV2I= -github.com/blevesearch/go-porterstemmer v1.0.2/go.mod h1:haWQqFT3RdOGz7PJuM3or/pWNJS1pKkoZJWCkWu0DVA= -github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f h1:kqbi9lqXLLs+zfWlgo1PIiRQ86n33K1JKotjj4rSYOg= -github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f/go.mod h1:IInt5XRvpiGE09KOk9mmCMLjHhydIhNPKPPFLFBB7L8= -github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d h1:iPCfLXcTYDotqO1atEOQyoRDwlGaZVuMI4wSaKQlI2I= -github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d/go.mod h1:cdytUvf6FKWA9NpXJihYdZq8TN2AiQ5HOS0UZUz0C9g= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blevesearch/bleve/v2 v2.0.0 h1:ybdeQ1ZjQcaUKxRsduYqCDzBmveXYbCQUCpG+jHxcG8= +github.com/blevesearch/bleve/v2 v2.0.0/go.mod h1:OBP2Pktqik8vEiUlGhuWjYx7KiO4zD542+DHqICwM5w= +github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= +github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= +github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo= +github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= +github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= +github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= +github.com/blevesearch/scorch_segment_api v1.0.0 h1:BUkCPWDg2gimTEyVDXf85I2buqqt4lh28uaVMiJsIYk= +github.com/blevesearch/scorch_segment_api v1.0.0/go.mod h1:KgRYmlfYC27NeM6cXOHx8LBgq7jn0atpV8mVWoBKBng= +github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac= +github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= +github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= +github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= +github.com/blevesearch/upsidedown_store_api v1.0.1 h1:1SYRwyoFLwG3sj0ed89RLtM15amfX2pXlYbFOnF8zNU= +github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q= +github.com/blevesearch/zapx/v11 v11.1.10 h1:8Eo3rXiHsVSP9Sk+4StrrwLrj9vyulhMVPmxTf8ZuDg= +github.com/blevesearch/zapx/v11 v11.1.10/go.mod h1:DTjbcBqrr/Uo82UBilDC8lEew42gN/OcIyiTNFtSijc= +github.com/blevesearch/zapx/v12 v12.1.10 h1:sqR+/0Z4dSTovApRqLA1HnilMtQer7a4UvPrNmPzlTM= +github.com/blevesearch/zapx/v12 v12.1.10/go.mod h1:14NmKnPrnKAIyiEJM566k/Jk+FQpuiflT5d3uaaK3MI= +github.com/blevesearch/zapx/v13 v13.1.10 h1:zCneEVRJDXwtDfSwh+33Dxguliv192vCK283zdGH4Sw= +github.com/blevesearch/zapx/v13 v13.1.10/go.mod h1:YsVY6YGpTEAlJOMjdL7EsdBLvjWd8kPa2gwJDNpqLJo= +github.com/blevesearch/zapx/v14 v14.1.10 h1:nD0vw2jxKogJFfA5WyoS4wNwZlVby3Aq8aW7CZi6YIw= +github.com/blevesearch/zapx/v14 v14.1.10/go.mod h1:hsULl5eJSxs5NEfBsmeT9qrqdCP+/ecpVZKt60M4V64= +github.com/blevesearch/zapx/v15 v15.1.10 h1:kZR3b9jO9l6s2B5UHI+1N1llLzJ4nYikkXQTMrDl1vQ= +github.com/blevesearch/zapx/v15 v15.1.10/go.mod h1:4ypq25bwtSQKzwEF1UERyIhmGTbMT3brY/n4NC5gRnM= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6 h1:T7Qykid5GIoDEVTZL0NcbimcT2qmzjo5mNGhe8i0/5M= -github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= -github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 h1:b8rnI4JWbakUNfpmYDxGobTY/jTuF5zHLw0ID75yzuM= -github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498/go.mod h1:mGI1GcdgmlL3Imff7Z+OjkkQ8qSKr443BuZ+qFgWbPQ= -github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M= -github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g= -github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8= -github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM= -github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/ghistogram v0.1.0 h1:b95QcQTCzjTUocDXp/uMgSNQi8oj1tGwnJ4bODWZnps= +github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= +github.com/couchbase/moss v0.1.0 h1:HCL+xxHUwmOaL44kMM/gU08OW6QGCui1WVFO58bjhNI= +github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= +github.com/couchbase/vellum v1.0.2 h1:BrbP0NKiyDdndMPec8Jjhy0U47CZ0Lgx3xUC2r9rZqw= +github.com/couchbase/vellum v1.0.2/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v1.5.4 h1:gVTrpUTbbr/T24uvoCaqY2KSHfNLVGm0w+hbee2HMeg= -github.com/dgraph-io/badger v1.5.4/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20190323231341-8198c7b169ec h1:sElGDs3V8VdCxH5tWi0ycWJzteOPLJ3HtItSSKI95PY= -github.com/dgryski/go-farm v0.0.0-20190323231341-8198c7b169ec/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= -github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y= -github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/dgraph-io/badger/v2 v2.0.3 h1:inzdf6VF/NZ+tJ8RwwYMjJMvsOALTHYdozn0qSl6XJI= +github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= +github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3 h1:MQLRM35Pp0yAyBYksjbj1nZI/w6eyRY/mWoM1sFf4kU= +github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b h1:SeiGBzKrEtuDddnBABHkp4kq9sBGE9nuYmk6FPTg0zg= +github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5iLFSdbEVA/idR6Q2WhCm+deb7ir2CcM= -github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o= +github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/raft v1.0.0 h1:htBVktAOtGs4Le5Z7K8SF5H2+oWsQFYVmOgH5loro7Y= -github.com/hashicorp/raft v1.0.0/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= -github.com/hashicorp/raft v1.1.0 h1:qPMePEczgbkiQsqCsRfuHRqvDUO+zmAInDaD5ptXlq0= -github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= -github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 h1:bLsrEmB2NUwkHH18FOJBIa04wOV2RQalJrcafTYu6Lg= -github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477/go.mod h1:aUF6HQr8+t3FC/ZHAC+pZreUBhTaxumuu3L+d37uRxk= -github.com/hashicorp/raft-mdb v0.0.0-20180824152511-9ee9663b6ffa h1:ccwcWyXHTaonH6yzx+t/3p9aNm/ogSTfd6YobZOtHmE= -github.com/hashicorp/raft-mdb v0.0.0-20180824152511-9ee9663b6ffa/go.mod h1:ooP3NrrH0GG/sVjF9pbRvhF6nVHRR4mkkwscLqReN1o= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.1.2 h1:oxEL5DDeurYxLd3UbcY/hccgSPhLLpiBZ1YxtWEq59c= +github.com/hashicorp/raft v1.1.2/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ikawaha/kagome.ipadic v1.0.1 h1:4c/tx3Rga6LvtTouEdvodcfeWWTttATZg8XIH8lRHG4= -github.com/ikawaha/kagome.ipadic v1.0.1/go.mod h1:Nh0/WFhzTQYw9XlsOxAuhdSZ1/xfi7vn5pjqb6FBwJE= -github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= -github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69 h1:/ylv98AIMI8XzkeqJGmJSTc/zRQrNllmYWW5b2MoyD4= -github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69/go.mod h1:H6ZQv8h8j98nwnF25XLGalSOLhFRjFQ2GGNZRNkkw8Y= -github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 h1:oWyemD7bnPAGRGGPE22W1Z+kspkC7Uclz5rdzgxxiwk= -github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217/go.mod h1:5JLTyA+23fYz/BfD5Hn736mGEZopzWtEx1pdNfnTp8k= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mash/go-accesslog v1.1.0 h1:y22583qP3s+SePBs6mv8ZTz5D1UffPrSg+WFEW2Rf/c= +github.com/mash/go-accesslog v1.1.0/go.mod h1:DAbGQzio0KX16krP/3uouoTPxGbzcPjFAb948zazOgg= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68 h1:LE+XIZDiXr587to+tCWKYPTrtQOmJzOxzcwhiDQIJbE= -github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68/go.mod h1:qy5KaSXSrNqdWFS/e3wWNFXZPRDnqjX79iRhOveUpfc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -160,103 +237,214 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 h1:gAuD3LIrjkoOOPLlhGlZWZXztrQII9a9kT6HS5jFtSY= -github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 h1:YDeskXpkNDhPdWN3REluVa46HQOVuVkjkd2sWnrABNQ= -github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys= -github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= -github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s= -github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2/go.mod h1:mjqs7N0Q6m5HpR7QfXVBZXZWSqTjQLeTujjA/xUp2uw= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/steveyen/gtreap v0.1.0 h1:CjhzTa274PyJLJuMZwIzCO1PfC00oRa8d1Kc78bFXJM= +github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19 h1:ma5vyZGiQ7pJ6oAlz39EFxVv6uQmfD4XXgdf528hsUI= -github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19/go.mod h1:2/ITuYMfAxT7SEIngRdPtyFD4rfTsutLuRfmt6sWio8= -github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 h1:HOxvxvnntLiPn123Fk+twfUhCQdMDaqmb0cclArW0T0= -github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 h1:XCbwcsP09zrBt1aYht0fASw+ynbEpYr8NnCkIN9nMM0= -golang.org/x/net v0.0.0-20190327214358-63eda1eb0650/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0 h1:2mqDk8w/o6UmeUCu5Qiq2y7iMf6anbx+YA8d1JFoFrs= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190329044733-9eb1bfa1ce65 h1:hOY+O8MxdkPV10pNf7/XEHaySCiPKxixMKUshfHsGn0= -golang.org/x/sys v0.0.0-20190329044733-9eb1bfa1ce65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d h1:XB2jc5XQ9uhizGTS2vWcN01bc4dI6z3C4KY5MQm8SS8= -google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/grpc/client.go b/grpc/client.go deleted file mode 100644 index cadfd2f..0000000 --- a/grpc/client.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpc - -import ( - "context" - "errors" - "math" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type Client struct { - ctx context.Context - cancel context.CancelFunc - conn *grpc.ClientConn - client protobuf.BlastClient -} - -func NewContext() (context.Context, context.CancelFunc) { - baseCtx := context.TODO() - //return context.WithTimeout(baseCtx, 60*time.Second) - return context.WithCancel(baseCtx) -} - -func NewClient(address string) (*Client, error) { - ctx, cancel := NewContext() - - //streamRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.Disable(), - //} - - //unaryRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), - // grpc_retry.WithCodes(codes.Unavailable), - // grpc_retry.WithMax(100), - //} - - dialOpts := []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithDefaultCallOptions( - grpc.MaxCallSendMsgSize(math.MaxInt32), - grpc.MaxCallRecvMsgSize(math.MaxInt32), - ), - //grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), - //grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), - } - - conn, err := grpc.DialContext(ctx, address, dialOpts...) - if err != nil { - return nil, err - } - - return &Client{ - ctx: ctx, - cancel: cancel, - conn: conn, - client: protobuf.NewBlastClient(conn), - }, nil -} - -func (c *Client) Cancel() { - c.cancel() -} - -func (c *Client) Close() error { - c.Cancel() - if c.conn != nil { - return c.conn.Close() - } - - return c.ctx.Err() -} - -func (c *Client) GetAddress() string { - return c.conn.Target() -} - -func (c *Client) GetNode(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { - req := &protobuf.GetNodeRequest{ - Id: id, - } - - resp, err := c.client.GetNode(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - ins, err := protobuf.MarshalAny(resp.NodeConfig) - nodeConfig := *ins.(*map[string]interface{}) - - node := map[string]interface{}{ - "node_config": nodeConfig, - "state": resp.State, - } - - return node, nil -} - -func (c *Client) SetNode(id string, nodeConfig map[string]interface{}, opts ...grpc.CallOption) error { - nodeConfigAny := &any.Any{} - err := protobuf.UnmarshalAny(nodeConfig, nodeConfigAny) - if err != nil { - return err - } - - req := &protobuf.SetNodeRequest{ - Id: id, - NodeConfig: nodeConfigAny, - } - - _, err = c.client.SetNode(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil -} - -func (c *Client) DeleteNode(id string, opts ...grpc.CallOption) error { - req := &protobuf.DeleteNodeRequest{ - Id: id, - } - - _, err := c.client.DeleteNode(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil -} - -func (c *Client) GetCluster(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetCluster(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - ins, err := protobuf.MarshalAny(resp.Cluster) - cluster := *ins.(*map[string]interface{}) - - return cluster, nil -} - -func (c *Client) WatchCluster(opts ...grpc.CallOption) (protobuf.Blast_WatchClusterClient, error) { - req := &empty.Empty{} - - watchClient, err := c.client.WatchCluster(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - return watchClient, nil -} - -func (c *Client) Snapshot(opts ...grpc.CallOption) error { - _, err := c.client.Snapshot(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) - - return errors.New(st.Message()) - } - - return nil -} - -func (c *Client) LivenessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) - - return protobuf.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil -} - -func (c *Client) ReadinessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) - - return protobuf.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil -} - -func (c *Client) GetValue(key string, opts ...grpc.CallOption) (interface{}, error) { - req := &protobuf.GetValueRequest{ - Key: key, - } - - resp, err := c.client.GetValue(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return nil, blasterrors.ErrNotFound - default: - return nil, errors.New(st.Message()) - } - } - - value, err := protobuf.MarshalAny(resp.Value) - - return value, nil -} - -func (c *Client) SetValue(key string, value interface{}, opts ...grpc.CallOption) error { - valueAny := &any.Any{} - err := protobuf.UnmarshalAny(value, valueAny) - if err != nil { - return err - } - - req := &protobuf.SetValueRequest{ - Key: key, - Value: valueAny, - } - - _, err = c.client.SetValue(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return blasterrors.ErrNotFound - default: - return errors.New(st.Message()) - } - } - - return nil -} - -func (c *Client) DeleteValue(key string, opts ...grpc.CallOption) error { - req := &protobuf.DeleteValueRequest{ - Key: key, - } - - _, err := c.client.DeleteValue(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return blasterrors.ErrNotFound - default: - return errors.New(st.Message()) - } - } - - return nil -} - -func (c *Client) WatchStore(key string, opts ...grpc.CallOption) (protobuf.Blast_WatchStoreClient, error) { - req := &protobuf.WatchStoreRequest{ - Key: key, - } - - watchClient, err := c.client.WatchStore(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - return watchClient, nil -} - -func (c *Client) GetDocument(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { - req := &protobuf.GetDocumentRequest{ - Id: id, - } - - resp, err := c.client.GetDocument(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return nil, blasterrors.ErrNotFound - default: - return nil, errors.New(st.Message()) - } - } - - ins, err := protobuf.MarshalAny(resp.Fields) - fields := *ins.(*map[string]interface{}) - - return fields, nil -} - -func (c *Client) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { - // bleve.SearchRequest -> Any - searchRequestAny := &any.Any{} - err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - return nil, err - } - - req := &protobuf.SearchRequest{ - SearchRequest: searchRequestAny, - } - - resp, err := c.client.Search(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - // Any -> bleve.SearchResult - searchResultInstance, err := protobuf.MarshalAny(resp.SearchResult) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - if searchResultInstance == nil { - return nil, errors.New("nil") - } - searchResult := searchResultInstance.(*bleve.SearchResult) - - return searchResult, nil -} - -func (c *Client) IndexDocument(docs []map[string]interface{}, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.IndexDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, doc := range docs { - id := doc["id"].(string) - fields := doc["fields"].(map[string]interface{}) - - fieldsAny := &any.Any{} - err := protobuf.UnmarshalAny(&fields, fieldsAny) - if err != nil { - return -1, err - } - - req := &protobuf.IndexDocumentRequest{ - Id: id, - Fields: fieldsAny, - } - - err = stream.Send(req) - if err != nil { - return -1, err - } - } - - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } - - return int(resp.Count), nil -} - -func (c *Client) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.DeleteDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, id := range ids { - req := &protobuf.DeleteDocumentRequest{ - Id: id, - } - - err := stream.Send(req) - if err != nil { - return -1, err - } - } - - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } - - return int(resp.Count), nil -} - -func (c *Client) GetIndexConfig(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - indexConfigIntr, err := protobuf.MarshalAny(resp.IndexConfig) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - indexConfig := *indexConfigIntr.(*map[string]interface{}) - - return indexConfig, nil -} - -func (c *Client) GetIndexStats(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - indexStatsIntr, err := protobuf.MarshalAny(resp.IndexStats) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - indexStats := *indexStatsIntr.(*map[string]interface{}) - - return indexStats, nil -} diff --git a/grpc/server.go b/grpc/server.go deleted file mode 100644 index 9360656..0000000 --- a/grpc/server.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpc - -import ( - "net" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/mosuka/blast/protobuf" - "go.uber.org/zap" - "google.golang.org/grpc" - //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" - //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" -) - -type Server struct { - service protobuf.BlastServer - server *grpc.Server - listener net.Listener - - logger *zap.Logger -} - -func NewServer(grpcAddr string, service protobuf.BlastServer, logger *zap.Logger) (*Server, error) { - server := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - //grpc_ctxtags.StreamServerInterceptor(), - //grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(logger), - //grpc_auth.StreamServerInterceptor(myAuthFunction), - //grpc_recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - //grpc_ctxtags.UnaryServerInterceptor(), - //grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(logger), - //grpc_auth.UnaryServerInterceptor(myAuthFunction), - //grpc_recovery.UnaryServerInterceptor(), - )), - ) - - protobuf.RegisterBlastServer(server, service) - - grpc_prometheus.EnableHandlingTimeHistogram() - grpc_prometheus.Register(server) - - listener, err := net.Listen("tcp", grpcAddr) - if err != nil { - return nil, err - } - - return &Server{ - service: service, - server: server, - listener: listener, - logger: logger, - }, nil -} - -func (s *Server) Start() error { - s.logger.Info("start server") - err := s.server.Serve(s.listener) - if err != nil { - return err - } - - return nil -} - -func (s *Server) Stop() error { - s.logger.Info("stop server") - s.server.Stop() - //s.server.GracefulStop() - - return nil -} diff --git a/grpc/service.go b/grpc/service.go deleted file mode 100644 index 5882e52..0000000 --- a/grpc/service.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpc - -import ( - "context" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/protobuf" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type Service struct{} - -func (s *Service) Start() error { - return nil -} - -func (s *Service) Stop() error { - return nil -} - -func (s *Service) LivenessProbe(ctx context.Context, req *empty.Empty) (*protobuf.LivenessProbeResponse, error) { - resp := &protobuf.LivenessProbeResponse{ - State: protobuf.LivenessProbeResponse_ALIVE, - } - - return resp, nil -} - -func (s *Service) ReadinessProbe(ctx context.Context, req *empty.Empty) (*protobuf.ReadinessProbeResponse, error) { - resp := &protobuf.ReadinessProbeResponse{ - State: protobuf.ReadinessProbeResponse_READY, - } - - return resp, nil -} - -func (s *Service) GetNode(ctx context.Context, req *protobuf.GetNodeRequest) (*protobuf.GetNodeResponse, error) { - return &protobuf.GetNodeResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) DeleteNode(ctx context.Context, req *protobuf.DeleteNodeRequest) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetCluster(ctx context.Context, req *empty.Empty) (*protobuf.GetClusterResponse, error) { - return &protobuf.GetClusterResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) WatchCluster(req *empty.Empty, server protobuf.Blast_WatchClusterServer) error { - return status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetValue(ctx context.Context, req *protobuf.GetValueRequest) (*protobuf.GetValueResponse, error) { - return &protobuf.GetValueResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) SetValue(ctx context.Context, req *protobuf.SetValueRequest) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) DeleteValue(ctx context.Context, req *protobuf.DeleteValueRequest) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) WatchStore(req *protobuf.WatchStoreRequest, server protobuf.Blast_WatchStoreServer) error { - return status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetDocument(ctx context.Context, req *protobuf.GetDocumentRequest) (*protobuf.GetDocumentResponse, error) { - return &protobuf.GetDocumentResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { - return &protobuf.SearchResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) IndexDocument(stream protobuf.Blast_IndexDocumentServer) error { - return status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) error { - return status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetIndexConfig(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexConfigResponse, error) { - return &protobuf.GetIndexConfigResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetIndexStats(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexStatsResponse, error) { - return &protobuf.GetIndexStatsResponse{}, status.Error(codes.Unavailable, "not implement") -} diff --git a/http/metric.go b/http/metric.go deleted file mode 100644 index 09afbf5..0000000 --- a/http/metric.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package http - -import ( - "net/http" - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - namespace = "http" - subsystem = "server" - - DurationSeconds = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "handling_seconds", - Help: "The invocation duration in seconds.", - }, - []string{ - "request_uri", - }, - ) - - RequestsTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "handled_total", - Help: "The number of requests.", - }, - []string{ - "request_uri", - "http_method", - "http_status", - }, - ) - - RequestsBytesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "requests_received_bytes", - Help: "A summary of the invocation requests bytes.", - }, - []string{ - "request_uri", - "http_method", - }, - ) - - ResponsesBytesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "responses_sent_bytes", - Help: "A summary of the invocation responses bytes.", - }, - []string{ - "request_uri", - "http_method", - }, - ) -) - -func init() { - prometheus.MustRegister(DurationSeconds) - prometheus.MustRegister(RequestsTotal) - prometheus.MustRegister(RequestsBytesTotal) - prometheus.MustRegister(ResponsesBytesTotal) -} - -func RecordMetrics(start time.Time, status int, writer http.ResponseWriter, request *http.Request) { - DurationSeconds.With(prometheus.Labels{"request_uri": request.RequestURI}).Observe(float64(time.Since(start)) / float64(time.Second)) - - RequestsTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method, "http_status": strconv.Itoa(status)}).Inc() - - RequestsBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method}).Add(float64(request.ContentLength)) - - contentLength, err := strconv.ParseFloat(writer.Header().Get("Content-Length"), 64) - if err == nil { - ResponsesBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method}).Add(contentLength) - } -} diff --git a/http/response.go b/http/response.go deleted file mode 100644 index d51fdc2..0000000 --- a/http/response.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package http - -import ( - "encoding/json" - "net/http" - "strconv" - - "go.uber.org/zap" -) - -func NewJSONMessage(msgMap map[string]interface{}) ([]byte, error) { - content, err := json.MarshalIndent(msgMap, "", " ") - if err != nil { - return nil, err - } - - return content, nil -} - -func WriteResponse(w http.ResponseWriter, content []byte, status int, logger *zap.Logger) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", strconv.FormatInt(int64(len(content)), 10)) - w.WriteHeader(status) - _, err := w.Write(content) - if err != nil { - logger.Error(err.Error()) - } - - return -} diff --git a/http/router.go b/http/router.go deleted file mode 100644 index 40a9a92..0000000 --- a/http/router.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package http - -import ( - "github.com/gorilla/mux" - "github.com/mosuka/blast/grpc" - "go.uber.org/zap" -) - -type Router struct { - mux.Router - - GRPCClient *grpc.Client - logger *zap.Logger -} - -func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { - grpcClient, err := grpc.NewClient(grpcAddr) - if err != nil { - return nil, err - } - - router := &Router{ - GRPCClient: grpcClient, - logger: logger, - } - - return router, nil -} - -func (r *Router) Close() error { - r.GRPCClient.Cancel() - - err := r.GRPCClient.Close() - if err != nil { - return err - } - - return nil -} diff --git a/http/server.go b/http/server.go deleted file mode 100644 index 8cdb7cf..0000000 --- a/http/server.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package http - -import ( - "net" - "net/http" - - accesslog "github.com/mash/go-accesslog" - "go.uber.org/zap" -) - -type Server struct { - listener net.Listener - router *Router - - logger *zap.Logger - httpLogger accesslog.Logger -} - -func NewServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - listener, err := net.Listen("tcp", httpAddr) - if err != nil { - return nil, err - } - - return &Server{ - listener: listener, - router: router, - logger: logger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() error { - err := http.Serve( - s.listener, - accesslog.NewLoggingHandler( - s.router, - s.httpLogger, - ), - ) - if err != nil { - return err - } - - return nil -} - -func (s *Server) Stop() error { - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go deleted file mode 100644 index 8cea720..0000000 --- a/indexer/grpc_service.go +++ /dev/null @@ -1,990 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "context" - "errors" - "fmt" - "io" - "reflect" - "sync" - "time" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/protobuf" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCService struct { - *grpc.Service - - clusterConfig *config.ClusterConfig - raftServer *RaftServer - logger *zap.Logger - - updateClusterStopCh chan struct{} - updateClusterDoneCh chan struct{} - peers map[string]interface{} - peerClients map[string]*grpc.Client - cluster map[string]interface{} - clusterChans map[chan protobuf.GetClusterResponse]struct{} - clusterMutex sync.RWMutex - - managers map[string]interface{} - managerClients map[string]*grpc.Client - updateManagersStopCh chan struct{} - updateManagersDoneCh chan struct{} -} - -func NewGRPCService(clusterConfig *config.ClusterConfig, raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { - return &GRPCService{ - clusterConfig: clusterConfig, - raftServer: raftServer, - logger: logger, - - peers: make(map[string]interface{}, 0), - peerClients: make(map[string]*grpc.Client, 0), - cluster: make(map[string]interface{}, 0), - clusterChans: make(map[chan protobuf.GetClusterResponse]struct{}), - - managers: make(map[string]interface{}, 0), - managerClients: make(map[string]*grpc.Client, 0), - }, nil -} - -func (s *GRPCService) Start() error { - s.logger.Info("start to update cluster info") - go s.startUpdateCluster(500 * time.Millisecond) - - if s.clusterConfig.ManagerAddr != "" { - s.logger.Info("start to update manager cluster info") - go s.startUpdateManagers(500 * time.Millisecond) - } - - return nil -} - -func (s *GRPCService) Stop() error { - s.logger.Info("stop to update cluster info") - s.stopUpdateCluster() - - if s.clusterConfig.ManagerAddr != "" { - s.logger.Info("stop to update manager cluster info") - s.stopUpdateManagers() - } - - return nil -} - -func (s *GRPCService) getManagerClient() (*grpc.Client, error) { - var client *grpc.Client - - for id, node := range s.managers { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("id", id)) - continue - } - - state, ok := nm["state"].(string) - if !ok { - s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) - continue - } - - if state == raft.Leader.String() || state == raft.Follower.String() { - client, ok = s.managerClients[id] - if ok { - return client, nil - } else { - s.logger.Error("node does not exist", zap.String("id", id)) - } - } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", state)) - } - } - - err := errors.New("available client does not exist") - s.logger.Error(err.Error()) - - return nil, err -} - -func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interface{}, error) { - client, err := grpc.NewClient(managerAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - return - }() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - managers, err := client.GetCluster() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return managers, nil -} - -func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { - s.updateManagersStopCh = make(chan struct{}) - s.updateManagersDoneCh = make(chan struct{}) - - defer func() { - close(s.updateManagersDoneCh) - }() - - var err error - - // get initial managers - s.managers, err = s.getInitialManagers(s.clusterConfig.ManagerAddr) - if err != nil { - s.logger.Error(err.Error()) - return - } - s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) - - // create clients for managers - for nodeId, node := range s.managers { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("id", nodeId)) - continue - } - - nodeConfig, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } - - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - if client != nil { - s.managerClients[nodeId] = client - } - } - - for { - select { - case <-s.updateManagersStopCh: - s.logger.Info("received a request to stop updating a manager cluster") - return - default: - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - stream, err := client.WatchCluster() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.logger.Info("wait for receive a manager cluster updates from stream") - resp, err := stream.Recv() - if err == io.EOF { - s.logger.Info(err.Error()) - continue - } - if err != nil { - s.logger.Error(err.Error()) - continue - } - - // get current manager cluster - managersIntr, err := protobuf.MarshalAny(resp.Cluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - if managersIntr == nil { - s.logger.Error(err.Error()) - continue - } - managers := *managersIntr.(*map[string]interface{}) - - if !reflect.DeepEqual(s.managers, managers) { - // open clients - for nodeId, nodeConfig := range managers { - mm, ok := nodeConfig.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) - continue - } - - grpcAddr, ok := mm["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - client, exist := s.managerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in manager list", zap.String("id", nodeId)) - - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - - delete(s.managerClients, nodeId) - - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId)) - } - - newClient, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - - if newClient != nil { - s.managerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - if newClient != nil { - s.managerClients[nodeId] = newClient - } - } - } - - // close nonexistent clients - for nodeId, client := range s.managerClients { - if nodeConfig, exist := managers[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("address", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("address", client.GetAddress())) - } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.managerClients, nodeId) - } - } - - // keep current manager cluster - s.managers = managers - s.logger.Debug("managers", zap.Any("managers", s.managers)) - } - } - } -} - -func (s *GRPCService) stopUpdateManagers() { - s.logger.Info("close all manager clients") - for id, client := range s.managerClients { - s.logger.Debug("close manager client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - } - - if s.updateManagersStopCh != nil { - s.logger.Info("send a request to stop updating a manager cluster") - close(s.updateManagersStopCh) - } - - s.logger.Info("wait for the manager cluster update to stop") - <-s.updateManagersDoneCh - s.logger.Info("the manager cluster update has been stopped") -} - -func (s *GRPCService) getLeaderClient() (*grpc.Client, error) { - var client *grpc.Client - - for id, node := range s.cluster { - state, ok := node.(map[string]interface{})["state"].(string) - if !ok { - s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) - continue - } - - if state == raft.Leader.String() { - client, ok = s.peerClients[id] - if ok { - break - } else { - s.logger.Error("node does not exist", zap.String("id", id)) - } - } else { - s.logger.Debug("not a leader", zap.String("id", id)) - } - } - - if client == nil { - err := errors.New("there is no leader") - s.logger.Error(err.Error()) - return nil, err - } - - return client, nil -} - -func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { - s.updateClusterStopCh = make(chan struct{}) - s.updateClusterDoneCh = make(chan struct{}) - - defer func() { - close(s.updateClusterDoneCh) - }() - - ticker := time.NewTicker(checkInterval) - defer ticker.Stop() - - for { - select { - case <-s.updateClusterStopCh: - s.logger.Info("received a request to stop updating a cluster") - return - case <-ticker.C: - cluster, err := s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create peer node list with out self node - peers := make(map[string]interface{}, 0) - for nodeId, node := range cluster { - if nodeId != s.NodeID() { - peers[nodeId] = node - } - } - - if !reflect.DeepEqual(s.peers, peers) { - // open clients - for nodeId, nodeInfo := range peers { - nodeConfig, ok := nodeInfo.(map[string]interface{})["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId), zap.Any("node_info", nodeInfo)) - continue - } - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - client, exist := s.peerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in peer list", zap.String("node_id", nodeId)) - - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - - delete(s.peerClients, nodeId) - - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId)) - } - - newClient, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - - if newClient != nil { - s.peerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - peerClient, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - if peerClient != nil { - s.logger.Debug("append peer client to peer client list", zap.String("grpc_addr", peerClient.GetAddress())) - s.peerClients[nodeId] = peerClient - } - } - } - - // close nonexistent clients - for nodeId, client := range s.peerClients { - if nodeConfig, exist := peers[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.peerClients, nodeId) - } - } - - // keep current peer nodes - s.logger.Debug("current peers", zap.Any("peers", peers)) - s.peers = peers - } - - // notify current cluster - if !reflect.DeepEqual(s.cluster, cluster) { - // convert to GetClusterResponse for channel output - clusterResp := &protobuf.GetClusterResponse{} - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) - if err != nil { - s.logger.Warn(err.Error()) - } - clusterResp.Cluster = clusterAny - - // output to channel - for c := range s.clusterChans { - c <- *clusterResp - } - - // notify cluster config to manager - if s.clusterConfig.ManagerAddr != "" && s.raftServer.IsLeader() { - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - } - err = client.SetValue(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId), cluster) - if err != nil { - s.logger.Error(err.Error()) - } - } - - // keep current cluster - s.logger.Debug("current cluster", zap.Any("cluster", cluster)) - s.cluster = cluster - } - default: - time.Sleep(100 * time.Millisecond) - } - } -} - -func (s *GRPCService) stopUpdateCluster() { - s.logger.Info("close all peer clients") - for id, client := range s.peerClients { - s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Warn(err.Error()) - } - } - - if s.updateClusterStopCh != nil { - s.logger.Info("send a request to stop updating a cluster") - close(s.updateClusterStopCh) - } - - s.logger.Info("wait for the cluster update to stop") - <-s.updateClusterDoneCh - s.logger.Info("the cluster update has been stopped") -} - -func (s *GRPCService) NodeID() string { - return s.raftServer.NodeID() -} - -func (s *GRPCService) getSelfNode() (map[string]interface{}, error) { - return map[string]interface{}{ - "node_config": s.raftServer.nodeConfig.ToMap(), - "state": s.raftServer.State().String(), - }, nil -} - -func (s *GRPCService) getPeerNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} - var err error - - if peerClient, exist := s.peerClients[id]; exist { - nodeInfo, err = peerClient.GetNode(id) - if err != nil { - s.logger.Warn(err.Error()) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), - } - } - } else { - s.logger.Warn("node does not exist in peer list", zap.String("id", id)) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), - } - } - - return nodeInfo, nil -} - -func (s *GRPCService) getNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} - var err error - - if id == "" || id == s.NodeID() { - nodeInfo, err = s.getSelfNode() - } else { - nodeInfo, err = s.getPeerNode(id) - } - - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return nodeInfo, nil -} - -func (s *GRPCService) GetNode(ctx context.Context, req *protobuf.GetNodeRequest) (*protobuf.GetNodeResponse, error) { - resp := &protobuf.GetNodeResponse{} - - nodeInfo, err := s.getNode(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - nodeConfigAny := &any.Any{} - if nodeConfig, exist := nodeInfo["node_config"]; exist { - err = protobuf.UnmarshalAny(nodeConfig.(map[string]interface{}), nodeConfigAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } else { - s.logger.Error("missing node_config", zap.Any("node_config", nodeConfig)) - } - - state, exist := nodeInfo["state"].(string) - if !exist { - s.logger.Error("missing node state", zap.String("state", state)) - state = raft.Shutdown.String() - } - - resp.NodeConfig = nodeConfigAny - resp.State = state - - return resp, nil -} - -func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) error { - if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(id, nodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = client.SetNode(id, nodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - ins, err := protobuf.MarshalAny(req.NodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - nodeConfig := *ins.(*map[string]interface{}) - - err = s.setNode(req.Id, nodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) deleteNode(id string) error { - if s.raftServer.IsLeader() { - err := s.raftServer.DeleteNode(id) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = client.DeleteNode(id) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) DeleteNode(ctx context.Context, req *protobuf.DeleteNodeRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.deleteNode(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) getCluster() (map[string]interface{}, error) { - cluster, err := s.raftServer.GetCluster() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - // update node state - for nodeId := range cluster { - node, err := s.getNode(nodeId) - if err != nil { - s.logger.Error(err.Error()) - } - state := node["state"].(string) - - if _, ok := cluster[nodeId]; !ok { - cluster[nodeId] = map[string]interface{}{} - } - nodeInfo := cluster[nodeId].(map[string]interface{}) - nodeInfo["state"] = state - } - - return cluster, nil -} - -func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protobuf.GetClusterResponse, error) { - resp := &protobuf.GetClusterResponse{} - - cluster, err := s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Cluster = clusterAny - - return resp, nil -} - -func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_WatchClusterServer) error { - chans := make(chan protobuf.GetClusterResponse) - - s.clusterMutex.Lock() - s.clusterChans[chans] = struct{}{} - s.clusterMutex.Unlock() - - defer func() { - s.clusterMutex.Lock() - delete(s.clusterChans, chans) - s.clusterMutex.Unlock() - close(chans) - }() - - for resp := range chans { - err := server.Send(&resp) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return nil -} - -func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.raftServer.Snapshot() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocumentRequest) (*protobuf.GetDocumentResponse, error) { - resp := &protobuf.GetDocumentResponse{} - - fields, err := s.raftServer.GetDocument(req.Id) - if err != nil { - s.logger.Error(err.Error()) - switch err { - case blasterrors.ErrNotFound: - return resp, status.Error(codes.NotFound, err.Error()) - default: - return resp, status.Error(codes.Internal, err.Error()) - } - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fields, fieldsAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Fields = fieldsAny - - return resp, nil -} - -func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { - resp := &protobuf.SearchResponse{} - - searchRequest, err := protobuf.MarshalAny(req.SearchRequest) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - searchResult, err := s.raftServer.Search(searchRequest.(*bleve.SearchRequest)) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.SearchResult = searchResultAny - - return resp, nil -} - -func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) error { - docs := make([]map[string]interface{}, 0) - - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - - // fields - ins, err := protobuf.MarshalAny(req.Fields) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - fields := *ins.(*map[string]interface{}) - - // document - doc := map[string]interface{}{ - "id": req.Id, - "fields": fields, - } - - docs = append(docs, doc) - } - - // index - count := -1 - var err error - if s.raftServer.IsLeader() { - count, err = s.raftServer.IndexDocument(docs) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - count, err = client.IndexDocument(docs) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return stream.SendAndClose( - &protobuf.IndexDocumentResponse{ - Count: int32(count), - }, - ) -} - -func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) error { - ids := make([]string, 0) - - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - - ids = append(ids, req.Id) - } - - // delete - count := -1 - var err error - if s.raftServer.IsLeader() { - count, err = s.raftServer.DeleteDocument(ids) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - count, err = client.DeleteDocument(ids) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return stream.SendAndClose( - &protobuf.DeleteDocumentResponse{ - Count: int32(count), - }, - ) -} - -func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexConfigResponse, error) { - resp := &protobuf.GetIndexConfigResponse{} - - indexConfig, err := s.raftServer.GetIndexConfig() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - indexConfigAny := &any.Any{} - err = protobuf.UnmarshalAny(indexConfig, indexConfigAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.IndexConfig = indexConfigAny - - return resp, nil -} - -func (s *GRPCService) GetIndexStats(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexStatsResponse, error) { - resp := &protobuf.GetIndexStatsResponse{} - - indexStats, err := s.raftServer.GetIndexStats() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - indexStatsAny := &any.Any{} - err = protobuf.UnmarshalAny(indexStats, indexStatsAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.IndexStats = indexStatsAny - - return resp, nil -} diff --git a/indexer/http_handler.go b/indexer/http_handler.go deleted file mode 100644 index 984143d..0000000 --- a/indexer/http_handler.go +++ /dev/null @@ -1,493 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "time" - - "github.com/blevesearch/bleve" - "github.com/gorilla/mux" - "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { - router, err := blasthttp.NewRouter(grpcAddr, logger) - if err != nil { - return nil, err - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/documents", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/documents/{id}", NewGetDocumentHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/documents/{id}", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents/{id}", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/search", NewSearchHandler(router.GRPCClient, logger)).Methods("POST") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type GetHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewGetDocumentHandler(client *grpc.Client, logger *zap.Logger) *GetHandler { - return &GetHandler{ - client: client, - logger: logger, - } -} - -func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - id := vars["id"] - - fields, err := h.client.GetDocument(id) - if err != nil { - switch err { - case errors.ErrNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // map[string]interface{} -> bytes - content, err = json.MarshalIndent(fields, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type IndexHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewSetDocumentHandler(client *grpc.Client, logger *zap.Logger) *IndexHandler { - return &IndexHandler{ - client: client, - logger: logger, - } -} - -func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - docs := make([]map[string]interface{}, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - // Indexing documents in bulk - err := json.Unmarshal(bodyBytes, &docs) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } else { - // Indexing a document - var fields map[string]interface{} - err := json.Unmarshal(bodyBytes, &fields) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - doc := map[string]interface{}{ - "id": id, - "fields": fields, - } - - docs = append(docs, doc) - } - - // index documents in bulk - count, err := h.client.IndexDocument(docs) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type DeleteHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewDeleteDocumentHandler(client *grpc.Client, logger *zap.Logger) *DeleteHandler { - return &DeleteHandler{ - client: client, - logger: logger, - } -} - -func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - ids := make([]string, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - // Deleting documents in bulk - err := json.Unmarshal(bodyBytes, &ids) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } else { - // Deleting a document - ids = append(ids, id) - } - - // delete documents in bulk - count, err := h.client.DeleteDocument(ids) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type SearchHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewSearchHandler(client *grpc.Client, logger *zap.Logger) *SearchHandler { - return &SearchHandler{ - client: client, - logger: logger, - } -} - -func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - searchRequestBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // []byte -> bleve.SearchRequest - searchRequest := bleve.NewSearchRequest(nil) - if len(searchRequestBytes) > 0 { - err := json.Unmarshal(searchRequestBytes, searchRequest) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } - - searchResult, err := h.client.Search(searchRequest) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = json.MarshalIndent(&searchResult, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/indexer/index.go b/indexer/index.go deleted file mode 100644 index 82e2ba3..0000000 --- a/indexer/index.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "os" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/document" - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" - "go.uber.org/zap" -) - -type Index struct { - indexConfig *config.IndexConfig - logger *zap.Logger - - index bleve.Index -} - -func NewIndex(dir string, indexConfig *config.IndexConfig, logger *zap.Logger) (*Index, error) { - //bleve.SetLog(logger) - - var index bleve.Index - _, err := os.Stat(dir) - if os.IsNotExist(err) { - // create new index - index, err = bleve.NewUsing(dir, indexConfig.IndexMapping, indexConfig.IndexType, indexConfig.IndexStorageType, nil) - if err != nil { - logger.Error(err.Error()) - return nil, err - } - } else { - // open existing index - index, err = bleve.OpenUsing(dir, map[string]interface{}{ - "create_if_missing": false, - "error_if_exists": false, - }) - if err != nil { - logger.Error(err.Error()) - return nil, err - } - } - - return &Index{ - index: index, - indexConfig: indexConfig, - logger: logger, - }, nil -} - -func (i *Index) Close() error { - err := i.index.Close() - if err != nil { - i.logger.Error(err.Error()) - return err - } - - return nil -} - -func (i *Index) Get(id string) (map[string]interface{}, error) { - doc, err := i.index.Document(id) - if err != nil { - i.logger.Error(err.Error()) - return nil, err - } - if doc == nil { - return nil, errors.ErrNotFound - } - - fields := make(map[string]interface{}, 0) - for _, f := range doc.Fields { - var v interface{} - switch field := f.(type) { - case *document.TextField: - v = string(field.Value()) - case *document.NumericField: - n, err := field.Number() - if err == nil { - v = n - } - case *document.DateTimeField: - d, err := field.DateTime() - if err == nil { - v = d.Format(time.RFC3339Nano) - } - } - existing, existed := fields[f.Name()] - if existed { - switch existing := existing.(type) { - case []interface{}: - fields[f.Name()] = append(existing, v) - case interface{}: - arr := make([]interface{}, 2) - arr[0] = existing - arr[1] = v - fields[f.Name()] = arr - } - } else { - fields[f.Name()] = v - } - } - - return fields, nil -} - -func (i *Index) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := i.index.Search(request) - if err != nil { - i.logger.Error(err.Error()) - return nil, err - } - - return result, nil -} - -func (i *Index) Index(id string, fields map[string]interface{}) error { - doc := map[string]interface{}{ - "id": id, - "fields": fields, - } - _, err := i.BulkIndex([]map[string]interface{}{doc}) - if err != nil { - i.logger.Error(err.Error()) - return err - } - - return nil -} - -func (i *Index) BulkIndex(docs []map[string]interface{}) (int, error) { - batch := i.index.NewBatch() - - count := 0 - - for _, doc := range docs { - id, ok := doc["id"].(string) - if !ok { - i.logger.Error("missing document id") - continue - } - fields, ok := doc["fields"].(map[string]interface{}) - if !ok { - i.logger.Error("missing document fields") - continue - } - err := batch.Index(id, fields) - if err != nil { - i.logger.Error(err.Error()) - continue - } - count++ - } - - err := i.index.Batch(batch) - if err != nil { - i.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (i *Index) Delete(id string) error { - _, err := i.BulkDelete([]string{id}) - if err != nil { - i.logger.Error(err.Error()) - return err - } - - return nil -} - -func (i *Index) BulkDelete(ids []string) (int, error) { - batch := i.index.NewBatch() - - count := 0 - - for _, id := range ids { - batch.Delete(id) - count++ - } - - err := i.index.Batch(batch) - if err != nil { - i.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (i *Index) Config() (map[string]interface{}, error) { - return i.indexConfig.ToMap(), nil -} - -func (i *Index) Stats() (map[string]interface{}, error) { - return i.index.StatsMap(), nil -} - -func (i *Index) SnapshotItems() <-chan *protobuf.Document { - ch := make(chan *protobuf.Document, 1024) - - go func() { - idx, _, err := i.index.Advanced() - if err != nil { - i.logger.Error(err.Error()) - return - } - - r, err := idx.Reader() - if err != nil { - i.logger.Error(err.Error()) - return - } - - docCount := 0 - - dr, err := r.DocIDReaderAll() - for { - if dr == nil { - i.logger.Error(err.Error()) - break - } - id, err := dr.Next() - if id == nil { - i.logger.Debug("finished to read all document ids") - break - } else if err != nil { - i.logger.Warn(err.Error()) - continue - } - - // get original document - fieldsBytes, err := i.index.GetInternal(id) - - // bytes -> map[string]interface{} - var fieldsMap map[string]interface{} - err = json.Unmarshal([]byte(fieldsBytes), &fieldsMap) - if err != nil { - i.logger.Error(err.Error()) - break - } - - // map[string]interface{} -> Any - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) - if err != nil { - i.logger.Error(err.Error()) - break - } - - doc := &protobuf.Document{ - Id: string(id), - Fields: fieldsAny, - } - - ch <- doc - - docCount = docCount + 1 - } - - i.logger.Debug("finished to write all documents to channel") - ch <- nil - - i.logger.Info("finished to snapshot", zap.Int("count", docCount)) - - return - }() - - return ch -} diff --git a/indexer/raft_command.go b/indexer/raft_command.go deleted file mode 100644 index 3cab8f0..0000000 --- a/indexer/raft_command.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import "encoding/json" - -type command int - -const ( - unknown command = iota - setNode - deleteNode - indexDocument - deleteDocument -) - -type message struct { - Command command `json:"command,omitempty"` - Data json.RawMessage `json:"data,omitempty"` -} - -func newMessage(cmd command, data interface{}) (*message, error) { - b, err := json.Marshal(data) - if err != nil { - return nil, err - } - return &message{ - Command: cmd, - Data: b, - }, nil -} diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go deleted file mode 100644 index 381cdc5..0000000 --- a/indexer/raft_fsm.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "errors" - "io" - "io/ioutil" - "sync" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/proto" - "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/maputils" - "github.com/mosuka/blast/protobuf" - "go.uber.org/zap" -) - -type RaftFSM struct { - path string - indexConfig *config.IndexConfig - logger *zap.Logger - - metadata maputils.Map - metadataMutex sync.RWMutex - - index *Index -} - -func NewRaftFSM(path string, indexConfig *config.IndexConfig, logger *zap.Logger) (*RaftFSM, error) { - return &RaftFSM{ - path: path, - indexConfig: indexConfig, - logger: logger, - }, nil -} - -func (f *RaftFSM) Start() error { - var err error - - f.metadata = maputils.Map{} - - f.index, err = NewIndex(f.path, f.indexConfig, f.logger) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) Stop() error { - err := f.index.Close() - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) GetNodeConfig(nodeId string) (map[string]interface{}, error) { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() - - nodeConfig, err := f.metadata.Get(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - if err == maputils.ErrNotFound { - return nil, blasterrors.ErrNotFound - } - return nil, err - } - - return nodeConfig.(maputils.Map).ToMap(), nil -} - -func (f *RaftFSM) SetNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() - - err := f.metadata.Merge(nodeId, nodeConfig) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - return err - } - - return nil -} - -func (f *RaftFSM) DeleteNodeConfig(nodeId string) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() - - err := f.metadata.Delete(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - return err - } - - return nil -} - -func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { - fields, err := f.index.Get(id) - if err != nil { - f.logger.Error(err.Error()) - return nil, err - } - - return fields, nil -} - -func (f *RaftFSM) IndexDocument(id string, fields map[string]interface{}) error { - err := f.index.Index(id, fields) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) IndexDocuments(docs []map[string]interface{}) (int, error) { - count, err := f.index.BulkIndex(docs) - if err != nil { - f.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (f *RaftFSM) DeleteDocument(id string) error { - err := f.index.Delete(id) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) DeleteDocuments(ids []string) (int, error) { - count, err := f.index.BulkDelete(ids) - if err != nil { - f.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (f *RaftFSM) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := f.index.Search(request) - if err != nil { - f.logger.Error(err.Error()) - return nil, err - } - - return result, nil -} - -func (f *RaftFSM) GetIndexConfig() (map[string]interface{}, error) { - return f.index.Config() -} - -func (f *RaftFSM) GetIndexStats() (map[string]interface{}, error) { - return f.index.Stats() -} - -type fsmResponse struct { - error error -} - -type fsmIndexDocumentResponse struct { - count int - error error -} - -type fsmDeleteDocumentResponse struct { - count int - error error -} - -func (f *RaftFSM) Apply(l *raft.Log) interface{} { - var msg message - err := json.Unmarshal(l.Data, &msg) - if err != nil { - return err - } - - switch msg.Command { - case setNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.SetNodeConfig(data["node_id"].(string), data["node_config"].(map[string]interface{})) - return &fsmResponse{error: err} - case deleteNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.DeleteNodeConfig(data["node_id"].(string)) - return &fsmResponse{error: err} - case indexDocument: - var data []map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmIndexDocumentResponse{count: -1, error: err} - } - count, err := f.IndexDocuments(data) - return &fsmIndexDocumentResponse{count: count, error: err} - case deleteDocument: - var data []string - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmDeleteDocumentResponse{count: -1, error: err} - } - count, err := f.DeleteDocuments(data) - return &fsmDeleteDocumentResponse{count: count, error: err} - default: - err = errors.New("unsupported command") - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } -} - -func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { - f.logger.Info("snapshot") - - return &RaftFSMSnapshot{ - index: f.index, - logger: f.logger, - }, nil -} - -func (f *RaftFSM) Restore(rc io.ReadCloser) error { - f.logger.Info("restore") - - defer func() { - err := rc.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - data, err := ioutil.ReadAll(rc) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - docCount := 0 - - buff := proto.NewBuffer(data) - for { - doc := &protobuf.Document{} - err = buff.DecodeMessage(doc) - if err == io.ErrUnexpectedEOF { - break - } - if err != nil { - f.logger.Error(err.Error()) - continue - } - - fields, err := protobuf.MarshalAny(doc.Fields) - if err != nil { - f.logger.Error(err.Error()) - continue - } - if fields == nil { - f.logger.Error("value is nil") - continue - } - fieldsMap := *fields.(*map[string]interface{}) - - err = f.index.Index(doc.Id, fieldsMap) - if err != nil { - f.logger.Error(err.Error()) - continue - } - - docCount = docCount + 1 - } - - f.logger.Info("restore", zap.Int("count", docCount)) - - return nil -} - -// --------------------- - -type RaftFSMSnapshot struct { - index *Index - logger *zap.Logger -} - -func (f *RaftFSMSnapshot) Persist(sink raft.SnapshotSink) error { - f.logger.Info("persist") - - defer func() { - err := sink.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - ch := f.index.SnapshotItems() - - docCount := 0 - - for { - doc := <-ch - if doc == nil { - break - } - - docBytes, err := json.Marshal(doc) - if err != nil { - f.logger.Error(err.Error()) - continue - } - - _, err = sink.Write(docBytes) - if err != nil { - f.logger.Error(err.Error()) - continue - } - - docCount = docCount + 1 - } - - f.logger.Info("persist", zap.Int("count", docCount)) - - return nil -} - -func (f *RaftFSMSnapshot) Release() { - f.logger.Info("release") -} diff --git a/indexer/raft_server.go b/indexer/raft_server.go deleted file mode 100644 index 78d933f..0000000 --- a/indexer/raft_server.go +++ /dev/null @@ -1,628 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "errors" - "io/ioutil" - "net" - "os" - "path/filepath" - "time" - - "github.com/blevesearch/bleve" - "github.com/hashicorp/raft" - raftboltdb "github.com/hashicorp/raft-boltdb" - raftbadgerdb "github.com/markthethomas/raft-badger" - _ "github.com/mosuka/blast/builtins" - "github.com/mosuka/blast/config" - blasterrors "github.com/mosuka/blast/errors" - "go.uber.org/zap" - //raftmdb "github.com/hashicorp/raft-mdb" -) - -type RaftServer struct { - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - bootstrap bool - logger *zap.Logger - - raft *raft.Raft - fsm *RaftFSM -} - -func NewRaftServer(nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { - return &RaftServer{ - nodeConfig: nodeConfig, - indexConfig: indexConfig, - bootstrap: bootstrap, - logger: logger, - }, nil -} - -func (s *RaftServer) Start() error { - var err error - - fsmPath := filepath.Join(s.nodeConfig.DataDir, "index") - s.logger.Info("create finite state machine", zap.String("path", fsmPath)) - s.fsm, err = NewRaftFSM(fsmPath, s.indexConfig, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("start finite state machine") - err = s.fsm.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft config", zap.String("node_id", s.nodeConfig.NodeId)) - raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.nodeConfig.NodeId) - raftConfig.SnapshotThreshold = 1024 - raftConfig.LogOutput = ioutil.Discard - - s.logger.Info("resolve TCP address", zap.String("bind_addr", s.nodeConfig.BindAddr)) - addr, err := net.ResolveTCPAddr("tcp", s.nodeConfig.BindAddr) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create TCP transport", zap.String("bind_addr", s.nodeConfig.BindAddr)) - transport, err := raft.NewTCPTransport(s.nodeConfig.BindAddr, addr, 3, 10*time.Second, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - snapshotPath := s.nodeConfig.DataDir - s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) - snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft machine") - var logStore raft.LogStore - var stableStore raft.StableStore - switch s.nodeConfig.RaftStorageType { - case "boltdb": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - case "badger": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - default: - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - } - - s.logger.Info("create Raft machine") - s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, transport) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - if s.bootstrap { - s.logger.Info("configure Raft machine as bootstrap") - configuration := raft.Configuration{ - Servers: []raft.Server{ - { - ID: raftConfig.LocalID, - Address: transport.LocalAddr(), - }, - }, - } - s.raft.BootstrapCluster(configuration) - - s.logger.Info("wait for become a leader") - err = s.WaitForDetectLeader(60 * time.Second) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - // set node config - s.logger.Info("register its own information", zap.String("node_id", s.nodeConfig.NodeId), zap.Any("node_config", s.nodeConfig)) - err = s.setNodeConfig(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) - if err != nil { - s.logger.Fatal(err.Error()) - return nil - } - } - - return nil -} - -func (s *RaftServer) Stop() error { - s.logger.Info("shutdown Raft machine") - f := s.raft.Shutdown() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - s.logger.Info("stop finite state machine") - err = s.fsm.Stop() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - timer := time.NewTimer(timeout) - defer timer.Stop() - - for { - select { - case <-ticker.C: - leaderAddr := s.raft.Leader() - if leaderAddr != "" { - s.logger.Debug("detect a leader", zap.String("address", string(leaderAddr))) - return leaderAddr, nil - } - case <-timer.C: - s.logger.Error("timeout exceeded") - return "", blasterrors.ErrTimeout - } - } -} - -func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { - leaderAddr, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - cf := s.raft.GetConfiguration() - err = cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - for _, server := range cf.Configuration().Servers { - if server.Address == leaderAddr { - return server.ID, nil - } - } - - s.logger.Error(blasterrors.ErrNotFoundLeader.Error()) - return "", blasterrors.ErrNotFoundLeader -} - -func (s *RaftServer) NodeID() string { - return s.nodeConfig.NodeId -} - -func (s *RaftServer) Stats() map[string]string { - return s.raft.Stats() -} - -func (s *RaftServer) State() raft.RaftState { - return s.raft.State() -} - -func (s *RaftServer) IsLeader() bool { - return s.State() == raft.Leader -} - -func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { - _, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) getNodeConfig(nodeId string) (map[string]interface{}, error) { - nodeConfig, err := s.fsm.GetNodeConfig(nodeId) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return nodeConfig, nil -} - -func (s *RaftServer) setNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { - msg, err := newMessage( - setNode, - map[string]interface{}{ - "node_id": nodeId, - "node_config": nodeConfig, - }, - ) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - msgBytes, err := json.Marshal(msg) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) deleteNodeConfig(nodeId string) error { - msg, err := newMessage( - deleteNode, - map[string]interface{}{ - "node_id": nodeId, - }, - ) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - msgBytes, err := json.Marshal(msg) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - node := make(map[string]interface{}, 0) - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(id) { - nodeConfig, err := s.getNodeConfig(id) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - node["node_config"] = nodeConfig - break - } - } - - return node, nil -} - -func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) error { - if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Info("node already joined the cluster", zap.String("id", nodeId)) - return nil - } - } - - bindAddr, ok := nodeConfig["bind_addr"].(string) - if !ok { - s.logger.Error("missing metadata", zap.String("bind_addr", bindAddr)) - return errors.New("missing metadata") - } - - // add node to Raft cluster - s.logger.Info("add voter", zap.String("nodeId", nodeId), zap.String("address", bindAddr)) - f := s.raft.AddVoter(raft.ServerID(nodeId), raft.ServerAddress(bindAddr), 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - // set node config - err = s.setNodeConfig(nodeId, nodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) DeleteNode(nodeId string) error { - if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - // delete node from Raft cluster - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Debug("remove server", zap.String("node_id", nodeId)) - f := s.raft.RemoveServer(server.ID, 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - } - - // delete node config - err = s.deleteNodeConfig(nodeId) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) GetCluster() (map[string]interface{}, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - cluster := map[string]interface{}{} - for _, server := range cf.Configuration().Servers { - node, err := s.GetNode(string(server.ID)) - if err != nil { - s.logger.Warn(err.Error()) - node = map[string]interface{}{} - } - cluster[string(server.ID)] = node - } - - return cluster, nil -} - -func (s *RaftServer) Snapshot() error { - f := s.raft.Snapshot() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) GetDocument(id string) (map[string]interface{}, error) { - fields, err := s.fsm.GetDocument(id) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return fields, nil -} - -func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := s.fsm.Search(request) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return result, nil -} - -func (s *RaftServer) IndexDocument(docs []map[string]interface{}) (int, error) { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return -1, raft.ErrNotLeader - } - - msg, err := newMessage( - indexDocument, - docs, - ) - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - msgBytes, err := json.Marshal(msg) - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - err = f.Response().(*fsmIndexDocumentResponse).error - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - return f.Response().(*fsmIndexDocumentResponse).count, nil -} - -func (s *RaftServer) DeleteDocument(ids []string) (int, error) { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return -1, raft.ErrNotLeader - } - - msg, err := newMessage( - deleteDocument, - ids, - ) - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - msgBytes, err := json.Marshal(msg) - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - err = f.Response().(*fsmDeleteDocumentResponse).error - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - return f.Response().(*fsmDeleteDocumentResponse).count, nil -} - -func (s *RaftServer) GetIndexConfig() (map[string]interface{}, error) { - indexConfig, err := s.fsm.GetIndexConfig() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return indexConfig, nil -} - -func (s *RaftServer) GetIndexStats() (map[string]interface{}, error) { - indexStats, err := s.fsm.GetIndexStats() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return indexStats, nil -} diff --git a/indexer/server.go b/indexer/server.go deleted file mode 100644 index d56de34..0000000 --- a/indexer/server.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "fmt" - - accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/http" - "go.uber.org/zap" -) - -type Server struct { - clusterConfig *config.ClusterConfig - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger - - raftServer *RaftServer - grpcService *GRPCService - grpcServer *grpc.Server - httpRouter *http.Router - httpServer *http.Server -} - -func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - return &Server{ - clusterConfig: clusterConfig, - nodeConfig: nodeConfig, - indexConfig: indexConfig, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() { - // get peer from manager - if s.clusterConfig.ManagerAddr != "" { - s.logger.Info("connect to manager", zap.String("manager_addr", s.clusterConfig.ManagerAddr)) - - mc, err := grpc.NewClient(s.clusterConfig.ManagerAddr) - defer func() { - s.logger.Debug("close client", zap.String("address", mc.GetAddress())) - err = mc.Close() - if err != nil { - s.logger.Error(err.Error()) - return - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - clusterIntr, err := mc.GetValue(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId)) - if err != nil && err != errors.ErrNotFound { - s.logger.Fatal(err.Error()) - return - } - if clusterIntr != nil { - cluster := *clusterIntr.(*map[string]interface{}) - for nodeId, nodeInfoIntr := range cluster { - if nodeId == s.nodeConfig.NodeId { - s.logger.Debug("skip own node id", zap.String("node_id", nodeId)) - continue - } - - nodeInfo := nodeInfoIntr.(map[string]interface{}) - - // get the peer node config - nodeConfig, ok := nodeInfo["node_config"].(map[string]interface{}) - if !ok { - s.logger.Error("missing node config", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } - - // get the peer node gRPC address - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Error("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - s.logger.Info("peer node detected", zap.String("peer_addr", grpcAddr)) - s.clusterConfig.PeerAddr = grpcAddr - break - } - } - } - - //get index config from manager or peer - if s.clusterConfig.ManagerAddr != "" { - mc, err := grpc.NewClient(s.clusterConfig.ManagerAddr) - defer func() { - s.logger.Debug("close client", zap.String("address", mc.GetAddress())) - err = mc.Close() - if err != nil { - s.logger.Error(err.Error()) - return - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - s.logger.Debug("pull index config from manager", zap.String("address", mc.GetAddress())) - value, err := mc.GetValue("/index_config") - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - if value != nil { - s.indexConfig = config.NewIndexConfigFromMap(*value.(*map[string]interface{})) - } - } else if s.clusterConfig.PeerAddr != "" { - pc, err := grpc.NewClient(s.clusterConfig.PeerAddr) - defer func() { - s.logger.Debug("close client", zap.String("address", pc.GetAddress())) - err = pc.Close() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - s.logger.Debug("pull index config from cluster peer", zap.String("address", pc.GetAddress())) - value, err := pc.GetIndexConfig() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - if value != nil { - s.indexConfig = config.NewIndexConfigFromMap(value) - } - } - - // bootstrap node? - bootstrap := s.clusterConfig.PeerAddr == "" - s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) - - var err error - - // create raft server - s.raftServer, err = NewRaftServer(s.nodeConfig, s.indexConfig, bootstrap, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC service - s.grpcService, err = NewGRPCService(s.clusterConfig, s.raftServer, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC server - s.grpcServer, err = grpc.NewServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP router - s.httpRouter, err = NewRouter(s.nodeConfig.GRPCAddr, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP server - s.httpServer, err = http.NewServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start Raft server - s.logger.Info("start Raft server") - err = s.raftServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start gRPC service - s.logger.Info("start gRPC service") - go func() { - err := s.grpcService.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC server - s.logger.Info("start gRPC server") - go func() { - err := s.grpcServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start HTTP server - s.logger.Info("start HTTP server") - go func() { - _ = s.httpServer.Start() - }() - - // join to the existing cluster - if !bootstrap { - client, err := grpc.NewClient(s.clusterConfig.PeerAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - err = client.SetNode(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - } -} - -func (s *Server) Stop() { - s.logger.Info("stop HTTP server") - err := s.httpServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - err = s.httpRouter.Close() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC server") - err = s.grpcServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC service") - err = s.grpcService.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop Raft server") - err = s.raftServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } -} diff --git a/indexer/server_test.go b/indexer/server_test.go deleted file mode 100644 index 307ecc3..0000000 --- a/indexer/server_test.go +++ /dev/null @@ -1,1878 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/hashicorp/raft" - - "github.com/blevesearch/bleve" - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/testutils" -) - -func TestServer_Start(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestServer_LivenessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // liveness - liveness, err := client.LivenessProbe() - if err != nil { - t.Fatalf("%v", err) - } - expLiveness := protobuf.LivenessProbeResponse_ALIVE.String() - actLiveness := liveness - if expLiveness != actLiveness { - t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) - } -} - -func TestServer_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // readiness - readiness, err := client.ReadinessProbe() - if err != nil { - t.Fatalf("%v", err) - } - expReadiness := protobuf.ReadinessProbeResponse_READY.String() - actReadiness := readiness - if expReadiness != actReadiness { - t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) - } -} - -func TestServer_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get node - node, err := client.GetNode(nodeConfig.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode := map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", - } - actNode := node - if !reflect.DeepEqual(expNode, actNode) { - t.Errorf("expected content to see %v, saw %v", expNode, actNode) - } -} - -func TestServer_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster - cluster, err := client.GetCluster() - if err != nil { - t.Errorf("%v", err) - } - expCluster := map[string]interface{}{ - nodeConfig.NodeId: map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", - }, - } - actCluster := cluster - if !reflect.DeepEqual(expCluster, actCluster) { - t.Errorf("expected content to see %v, saw %v", expCluster, actCluster) - } -} - -func TestServer_GetIndexMapping(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexMapping := indexConfig.IndexMapping - if err != nil { - t.Fatalf("%v", err) - } - - actIndexConfigMap, err := client.GetIndexConfig() - if err != nil { - t.Fatalf("%v", err) - } - - actIndexMapping, err := indexutils.NewIndexMappingFromMap(actIndexConfigMap["index_mapping"].(map[string]interface{})) - if err != nil { - t.Fatalf("%v", err) - } - - if !reflect.DeepEqual(expIndexMapping, actIndexMapping) { - t.Errorf("expected content to see %v, saw %v", expIndexMapping, actIndexMapping) - } -} - -func TestServer_GetIndexType(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexType := indexConfig.IndexType - if err != nil { - t.Errorf("%v", err) - } - - actIndexConfigMap, err := client.GetIndexConfig() - if err != nil { - t.Fatalf("%v", err) - } - - actIndexType := actIndexConfigMap["index_type"].(string) - - if !reflect.DeepEqual(expIndexType, actIndexType) { - t.Errorf("expected content to see %v, saw %v", expIndexType, actIndexType) - } -} - -func TestServer_GetIndexStorageType(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexStorageType := indexConfig.IndexStorageType - if err != nil { - t.Errorf("%v", err) - } - - actIndexConfigMap, err := client.GetIndexConfig() - if err != nil { - t.Fatalf("%v", err) - } - - actIndexStorageType := actIndexConfigMap["index_storage_type"].(string) - - if !reflect.DeepEqual(expIndexStorageType, actIndexStorageType) { - t.Errorf("expected content to see %v, saw %v", expIndexStorageType, actIndexStorageType) - } -} - -func TestServer_GetIndexStats(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexStats := map[string]interface{}{ - "index": map[string]interface{}{ - "analysis_time": float64(0), - "batches": float64(0), - "deletes": float64(0), - "errors": float64(0), - "index_time": float64(0), - "num_plain_text_bytes_indexed": float64(0), - "term_searchers_finished": float64(0), - "term_searchers_started": float64(0), - "updates": float64(0), - }, - "search_time": float64(0), - "searches": float64(0), - } - - actIndexStats, err := client.GetIndexStats() - if err != nil { - t.Fatalf("%v", err) - } - - if !reflect.DeepEqual(expIndexStats, actIndexStats) { - t.Errorf("expected content to see %v, saw %v", expIndexStats, actIndexStats) - } -} - -func TestServer_PutDocument(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // put document - docs := make([]map[string]interface{}, 0) - docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - docFile1, err := os.Open(docPath1) - if err != nil { - t.Errorf("%v", err) - } - defer func() { - _ = docFile1.Close() - }() - docBytes1, err := ioutil.ReadAll(docFile1) - if err != nil { - t.Errorf("%v", err) - } - var docFields1 map[string]interface{} - err = json.Unmarshal(docBytes1, &docFields1) - if err != nil { - t.Errorf("%v", err) - } - doc1 := map[string]interface{}{ - "id": "doc1", - "fields": docFields1, - } - docs = append(docs, doc1) - count, err := client.IndexDocument(docs) - if err != nil { - t.Errorf("%v", err) - } - - expCount := 1 - actCount := count - - if expCount != actCount { - t.Errorf("expected content to see %v, saw %v", expCount, actCount) - } -} - -func TestServer_GetDocument(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // put document - putDocs := make([]map[string]interface{}, 0) - putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - putDocFile1, err := os.Open(putDocPath1) - if err != nil { - t.Errorf("%v", err) - } - defer func() { - _ = putDocFile1.Close() - }() - putDocBytes1, err := ioutil.ReadAll(putDocFile1) - if err != nil { - t.Errorf("%v", err) - } - var putDocFields1 map[string]interface{} - err = json.Unmarshal(putDocBytes1, &putDocFields1) - if err != nil { - t.Errorf("%v", err) - } - putDoc1 := map[string]interface{}{ - "id": "doc1", - "fields": putDocFields1, - } - putDocs = append(putDocs, putDoc1) - putCount, err := client.IndexDocument(putDocs) - if err != nil { - t.Errorf("%v", err) - } - - expPutCount := 1 - actPutCount := putCount - - if expPutCount != actPutCount { - t.Errorf("expected content to see %v, saw %v", expPutCount, actPutCount) - } - - // get document - getDocFields1, err := client.GetDocument("doc1") - if err != nil { - t.Errorf("%v", err) - } - expGetDocFields1 := putDocFields1 - actGetDocFields1 := getDocFields1 - if !reflect.DeepEqual(expGetDocFields1, actGetDocFields1) { - t.Errorf("expected content to see %v, saw %v", expGetDocFields1, actGetDocFields1) - } - - // get non-existing document - getDocFields2, err := client.GetDocument("doc2") - if err != errors.ErrNotFound { - t.Errorf("%v", err) - } - if getDocFields2 != nil { - t.Errorf("expected content to see nil, saw %v", getDocFields2) - } -} - -func TestServer_DeleteDocument(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // put document - putDocs := make([]map[string]interface{}, 0) - putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - putDocFile1, err := os.Open(putDocPath1) - if err != nil { - t.Errorf("%v", err) - } - defer func() { - _ = putDocFile1.Close() - }() - putDocBytes1, err := ioutil.ReadAll(putDocFile1) - if err != nil { - t.Errorf("%v", err) - } - var putDocFields1 map[string]interface{} - err = json.Unmarshal(putDocBytes1, &putDocFields1) - if err != nil { - t.Errorf("%v", err) - } - putDoc1 := map[string]interface{}{ - "id": "doc1", - "fields": putDocFields1, - } - putDocs = append(putDocs, putDoc1) - putCount, err := client.IndexDocument(putDocs) - if err != nil { - t.Errorf("%v", err) - } - - expPutCount := 1 - actPutCount := putCount - - if expPutCount != actPutCount { - t.Errorf("expected content to see %v, saw %v", expPutCount, actPutCount) - } - - // get document - getDocFields1, err := client.GetDocument("doc1") - if err != nil { - t.Errorf("%v", err) - } - expGetDocFields1 := putDocFields1 - actGetDocFields1 := getDocFields1 - if !reflect.DeepEqual(expGetDocFields1, actGetDocFields1) { - t.Errorf("expected content to see %v, saw %v", expGetDocFields1, actGetDocFields1) - } - - // get non-existing document - getDocFields2, err := client.GetDocument("non-existing") - if err != errors.ErrNotFound { - t.Errorf("%v", err) - } - if getDocFields2 != nil { - t.Errorf("expected content to see nil, saw %v", getDocFields2) - } - - // delete document - delCount, err := client.DeleteDocument([]string{"doc1"}) - if err != nil { - t.Errorf("%v", err) - } - expDelCount := 1 - actDelCount := delCount - if expDelCount != actDelCount { - t.Errorf("expected content to see %v, saw %v", expDelCount, actDelCount) - } - - // get document - getDocFields1, err = client.GetDocument("doc1") - if err != errors.ErrNotFound { - t.Errorf("%v", err) - } - if getDocFields1 != nil { - t.Errorf("expected content to see nil, saw %v", getDocFields1) - } - - // delete non-existing document - getDocFields1, err = client.GetDocument("non-existing") - if err != errors.ErrNotFound { - t.Errorf("%v", err) - } - if getDocFields1 != nil { - t.Errorf("expected content to see nil, saw %v", getDocFields1) - } -} - -func TestServer_Search(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Errorf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // put document - putDocs := make([]map[string]interface{}, 0) - putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - putDocFile1, err := os.Open(putDocPath1) - if err != nil { - t.Errorf("%v", err) - } - defer func() { - _ = putDocFile1.Close() - }() - putDocBytes1, err := ioutil.ReadAll(putDocFile1) - if err != nil { - t.Errorf("%v", err) - } - var putDocFields1 map[string]interface{} - err = json.Unmarshal(putDocBytes1, &putDocFields1) - if err != nil { - t.Errorf("%v", err) - } - putDoc1 := map[string]interface{}{ - "id": "doc1", - "fields": putDocFields1, - } - putDocs = append(putDocs, putDoc1) - putCount, err := client.IndexDocument(putDocs) - if err != nil { - t.Errorf("%v", err) - } - - expPutCount := 1 - actPutCount := putCount - - if expPutCount != actPutCount { - t.Errorf("expected content to see %v, saw %v", expPutCount, actPutCount) - } - - // search - searchRequestPath := filepath.Join(curDir, "../example/wiki_search_request.json") - - searchRequestFile, err := os.Open(searchRequestPath) - if err != nil { - t.Errorf("%v", err) - } - defer func() { - _ = searchRequestFile.Close() - }() - - searchRequestByte, err := ioutil.ReadAll(searchRequestFile) - if err != nil { - t.Errorf("%v", err) - } - - searchRequest := bleve.NewSearchRequest(nil) - err = json.Unmarshal(searchRequestByte, searchRequest) - if err != nil { - t.Errorf("%v", err) - } - - searchResult1, err := client.Search(searchRequest) - if err != nil { - t.Errorf("%v", err) - } - expTotal := uint64(1) - actTotal := searchResult1.Total - if expTotal != actTotal { - t.Errorf("expected content to see %v, saw %v", expTotal, actTotal) - } -} - -func TestCluster_Start(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestCluster_LivenessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // liveness check for server1 - liveness1, err := client1.LivenessProbe() - if err != nil { - t.Errorf("%v", err) - } - expLiveness1 := protobuf.LivenessProbeResponse_ALIVE.String() - actLiveness1 := liveness1 - if expLiveness1 != actLiveness1 { - t.Errorf("expected content to see %v, saw %v", expLiveness1, actLiveness1) - } - - // liveness check for server2 - liveness2, err := client2.LivenessProbe() - if err != nil { - t.Errorf("%v", err) - } - expLiveness2 := protobuf.LivenessProbeResponse_ALIVE.String() - actLiveness2 := liveness2 - if expLiveness2 != actLiveness2 { - t.Errorf("expected content to see %v, saw %v", expLiveness2, actLiveness2) - } - - // liveness check for server3 - liveness3, err := client3.LivenessProbe() - if err != nil { - t.Errorf("%v", err) - } - expLiveness3 := protobuf.LivenessProbeResponse_ALIVE.String() - actLiveness3 := liveness3 - if expLiveness3 != actLiveness3 { - t.Errorf("expected content to see %v, saw %v", expLiveness3, actLiveness3) - } -} - -func TestCluster_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // readiness check for server1 - readiness1, err := client1.ReadinessProbe() - if err != nil { - t.Errorf("%v", err) - } - expReadiness1 := protobuf.ReadinessProbeResponse_READY.String() - actReadiness1 := readiness1 - if expReadiness1 != actReadiness1 { - t.Errorf("expected content to see %v, saw %v", expReadiness1, actReadiness1) - } - - // readiness check for server2 - readiness2, err := client2.ReadinessProbe() - if err != nil { - t.Errorf("%v", err) - } - expReadiness2 := protobuf.ReadinessProbeResponse_READY.String() - actReadiness2 := readiness2 - if expReadiness2 != actReadiness2 { - t.Errorf("expected content to see %v, saw %v", expReadiness2, actReadiness2) - } - - // readiness check for server3 - readiness3, err := client3.ReadinessProbe() - if err != nil { - t.Errorf("%v", err) - } - expReadiness3 := protobuf.ReadinessProbeResponse_READY.String() - actReadiness3 := readiness3 - if expReadiness3 != actReadiness3 { - t.Errorf("expected content to see %v, saw %v", expReadiness3, actReadiness3) - } -} - -func TestCluster_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // get all node info from all nodes - node11, err := client1.GetNode(nodeConfig1.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode11 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), - } - actNode11 := node11 - if !reflect.DeepEqual(expNode11, actNode11) { - t.Errorf("expected content to see %v, saw %v", expNode11, actNode11) - } - - node12, err := client1.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode12 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode12 := node12 - if !reflect.DeepEqual(expNode12, actNode12) { - t.Errorf("expected content to see %v, saw %v", expNode12, actNode12) - } - - node13, err := client1.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode13 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode13 := node13 - if !reflect.DeepEqual(expNode13, actNode13) { - t.Errorf("expected content to see %v, saw %v", expNode13, actNode13) - } - - node21, err := client2.GetNode(nodeConfig1.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode21 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), - } - actNode21 := node21 - if !reflect.DeepEqual(expNode21, actNode21) { - t.Errorf("expected content to see %v, saw %v", expNode21, actNode21) - } - - node22, err := client2.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode22 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode22 := node22 - if !reflect.DeepEqual(expNode22, actNode22) { - t.Errorf("expected content to see %v, saw %v", expNode22, actNode22) - } - - node23, err := client2.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode23 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode23 := node23 - if !reflect.DeepEqual(expNode23, actNode23) { - t.Errorf("expected content to see %v, saw %v", expNode23, actNode23) - } - - node31, err := client3.GetNode(nodeConfig1.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode31 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), - } - actNode31 := node31 - if !reflect.DeepEqual(expNode31, actNode31) { - t.Errorf("expected content to see %v, saw %v", expNode31, actNode31) - } - - node32, err := client3.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode32 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode32 := node32 - if !reflect.DeepEqual(expNode32, actNode32) { - t.Errorf("expected content to see %v, saw %v", expNode32, actNode32) - } - - node33, err := client3.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode33 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode33 := node33 - if !reflect.DeepEqual(expNode33, actNode33) { - t.Errorf("expected content to see %v, saw %v", expNode33, actNode33) - } -} - -func TestCluster_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // get cluster info from all servers - cluster1, err := client1.GetCluster() - if err != nil { - t.Errorf("%v", err) - } - expCluster1 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), - }, - } - actCluster1 := cluster1 - if !reflect.DeepEqual(expCluster1, actCluster1) { - t.Errorf("expected content to see %v, saw %v", expCluster1, actCluster1) - } - - cluster2, err := client2.GetCluster() - if err != nil { - t.Errorf("%v", err) - } - expCluster2 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), - }, - } - actCluster2 := cluster2 - if !reflect.DeepEqual(expCluster2, actCluster2) { - t.Errorf("expected content to see %v, saw %v", expCluster2, actCluster2) - } - - cluster3, err := client3.GetCluster() - if err != nil { - t.Errorf("%v", err) - } - expCluster3 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), - }, - } - actCluster3 := cluster3 - if !reflect.DeepEqual(expCluster3, actCluster3) { - t.Errorf("expected content to see %v, saw %v", expCluster3, actCluster3) - } -} diff --git a/logutils/logger.go b/log/log.go similarity index 54% rename from logutils/logger.go rename to log/log.go index 28611dd..5470fdf 100644 --- a/logutils/logger.go +++ b/log/log.go @@ -1,22 +1,10 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutils +package log import ( "os" + "strconv" + accesslog "github.com/mash/go-accesslog" "github.com/natefinch/lumberjack" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -42,9 +30,12 @@ func NewLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackup } var ws zapcore.WriteSyncer - if logFilename == "" { + switch logFilename { + case "", os.Stderr.Name(): ws = zapcore.AddSync(os.Stderr) - } else { + case os.Stdout.Name(): + ws = zapcore.AddSync(os.Stdout) + default: ws = zapcore.AddSync( &lumberjack.Logger{ Filename: logFilename, @@ -74,7 +65,43 @@ func NewLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackup ), zap.AddCaller(), //zap.AddStacktrace(ll), - ) + ).Named("blast") return logger } + +type HTTPLogger struct { + Logger *zap.Logger +} + +func (l HTTPLogger) Log(record accesslog.LogRecord) { + // Output log that formatted Apache combined. + size := "-" + if record.Size > 0 { + size = strconv.FormatInt(record.Size, 10) + } + + referer := "-" + if record.RequestHeader.Get("Referer") != "" { + referer = record.RequestHeader.Get("Referer") + } + + userAgent := "-" + if record.RequestHeader.Get("User-Agent") != "" { + userAgent = record.RequestHeader.Get("User-Agent") + } + + l.Logger.Info( + "", + zap.String("ip", record.Ip), + zap.String("username", record.Username), + zap.String("time", record.Time.Format("02/Jan/2006 03:04:05 +0000")), + zap.String("method", record.Method), + zap.String("uri", record.Uri), + zap.String("protocol", record.Protocol), + zap.Int("status", record.Status), + zap.String("size", size), + zap.String("referer", referer), + zap.String("user_agent", userAgent), + ) +} diff --git a/logutils/grpc_logger.go b/logutils/grpc_logger.go deleted file mode 100644 index 85d6fa9..0000000 --- a/logutils/grpc_logger.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutils - -import ( - "os" - - "github.com/natefinch/lumberjack" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func NewGRPCLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackups int, logMaxAge int, logCompress bool) *zap.Logger { - var ll zapcore.Level - switch logLevel { - case "DEBUG": - ll = zap.DebugLevel - case "INFO": - ll = zap.InfoLevel - case "WARN", "WARNING": - ll = zap.WarnLevel - case "ERR", "ERROR": - ll = zap.WarnLevel - case "DPANIC": - ll = zap.DPanicLevel - case "PANIC": - ll = zap.PanicLevel - case "FATAL": - ll = zap.FatalLevel - } - - var ws zapcore.WriteSyncer - if logFilename == "" { - ws = zapcore.AddSync(os.Stderr) - } else { - ws = zapcore.AddSync( - &lumberjack.Logger{ - Filename: logFilename, - MaxSize: logMaxSize, // megabytes - MaxBackups: logMaxBackups, - MaxAge: logMaxAge, // days - Compress: logCompress, - }, - ) - } - - ec := zap.NewProductionEncoderConfig() - ec.TimeKey = "_timestamp_" - ec.LevelKey = "_level_" - ec.NameKey = "_name_" - ec.CallerKey = "_caller_" - ec.MessageKey = "_message_" - ec.StacktraceKey = "_stacktrace_" - ec.EncodeTime = zapcore.ISO8601TimeEncoder - ec.EncodeCaller = zapcore.ShortCallerEncoder - - logger := zap.New( - zapcore.NewCore( - zapcore.NewJSONEncoder(ec), - ws, - ll, - ), - //zap.AddCaller(), - //zap.AddStacktrace(ll), - ) - - return logger -} diff --git a/logutils/http_logger.go b/logutils/http_logger.go deleted file mode 100644 index bb4371f..0000000 --- a/logutils/http_logger.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutils - -import ( - "io" - "log" - "os" - "strconv" - - accesslog "github.com/mash/go-accesslog" - "github.com/natefinch/lumberjack" -) - -func NewFileWriter(filename string, maxSize int, maxBackups int, maxAge int, compress bool) io.Writer { - var writer io.Writer - - switch filename { - case "", os.Stderr.Name(): - writer = os.Stderr - case os.Stdout.Name(): - writer = os.Stdout - default: - writer = &lumberjack.Logger{ - Filename: filename, - MaxSize: maxSize, // megabytes - MaxBackups: maxBackups, - MaxAge: maxAge, // days - Compress: compress, // disabled by default - } - } - - return writer -} - -type ApacheCombinedLogger struct { - logger *log.Logger -} - -func NewApacheCombinedLogger(filename string, maxSize int, maxBackups int, maxAge int, compress bool) *ApacheCombinedLogger { - writer := NewFileWriter(filename, maxSize, maxBackups, maxAge, compress) - return &ApacheCombinedLogger{ - logger: log.New(writer, "", 0), - } -} - -func (l ApacheCombinedLogger) Log(record accesslog.LogRecord) { - // Output log that formatted Apache combined. - size := "-" - if record.Size > 0 { - size = strconv.FormatInt(record.Size, 10) - } - - referer := "-" - if record.RequestHeader.Get("Referer") != "" { - referer = record.RequestHeader.Get("Referer") - } - - userAgent := "-" - if record.RequestHeader.Get("User-Agent") != "" { - userAgent = record.RequestHeader.Get("User-Agent") - } - - l.logger.Printf( - "%s - %s [%s] \"%s %s %s\" %d %s \"%s\" \"%s\" %.4f", - record.Ip, - record.Username, - record.Time.Format("02/Jan/2006 03:04:05 +0000"), - record.Method, - record.Uri, - record.Protocol, - record.Status, - size, - referer, - userAgent, - record.ElapsedTime.Seconds(), - ) -} diff --git a/main.go b/main.go new file mode 100644 index 0000000..3ad98ef --- /dev/null +++ b/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "os" + + "github.com/mosuka/blast/cmd" +) + +func main() { + if err := cmd.Execute(); err != nil { + os.Exit(1) + } + + os.Exit(0) +} diff --git a/manager/grpc_service.go b/manager/grpc_service.go deleted file mode 100644 index 3b6a0f4..0000000 --- a/manager/grpc_service.go +++ /dev/null @@ -1,671 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "context" - "errors" - "reflect" - "strings" - "sync" - "time" - - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/protobuf" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCService struct { - *grpc.Service - - raftServer *RaftServer - logger *zap.Logger - - updateClusterStopCh chan struct{} - updateClusterDoneCh chan struct{} - peers map[string]interface{} - peerClients map[string]*grpc.Client - cluster map[string]interface{} - clusterChans map[chan protobuf.GetClusterResponse]struct{} - clusterMutex sync.RWMutex - - stateChans map[chan protobuf.WatchStoreResponse]struct{} - stateMutex sync.RWMutex -} - -func NewGRPCService(raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { - return &GRPCService{ - raftServer: raftServer, - logger: logger, - - peers: make(map[string]interface{}, 0), - peerClients: make(map[string]*grpc.Client, 0), - cluster: make(map[string]interface{}, 0), - clusterChans: make(map[chan protobuf.GetClusterResponse]struct{}), - - stateChans: make(map[chan protobuf.WatchStoreResponse]struct{}), - }, nil -} - -func (s *GRPCService) Start() error { - s.logger.Info("start to update cluster info") - go s.startUpdateCluster(500 * time.Millisecond) - - return nil -} - -func (s *GRPCService) Stop() error { - s.logger.Info("stop to update cluster info") - s.stopUpdateCluster() - - return nil -} - -func (s *GRPCService) getLeaderClient() (*grpc.Client, error) { - var client *grpc.Client - - for id, node := range s.cluster { - state, ok := node.(map[string]interface{})["state"].(string) - if !ok { - s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) - continue - } - - if state == raft.Leader.String() { - client, ok = s.peerClients[id] - if ok { - break - } else { - s.logger.Error("node does not exist", zap.String("id", id)) - } - } else { - s.logger.Debug("not a leader", zap.String("id", id)) - } - } - - if client == nil { - err := errors.New("there is no leader") - s.logger.Error(err.Error()) - return nil, err - } - - return client, nil -} - -func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { - s.updateClusterStopCh = make(chan struct{}) - s.updateClusterDoneCh = make(chan struct{}) - - defer func() { - close(s.updateClusterDoneCh) - }() - - ticker := time.NewTicker(checkInterval) - defer ticker.Stop() - - for { - select { - case <-s.updateClusterStopCh: - s.logger.Info("received a request to stop updating a cluster") - return - case <-ticker.C: - cluster, err := s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create peer node list with out self node - peers := make(map[string]interface{}, 0) - for nodeId, node := range cluster { - if nodeId != s.NodeID() { - peers[nodeId] = node - } - } - - if !reflect.DeepEqual(s.peers, peers) { - // open clients - for nodeId, nodeInfo := range peers { - nodeConfig, ok := nodeInfo.(map[string]interface{})["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId), zap.Any("node_info", nodeInfo)) - continue - } - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - client, exist := s.peerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in peer list", zap.String("node_id", nodeId)) - - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - - delete(s.peerClients, nodeId) - - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId)) - } - - newClient, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - - if newClient != nil { - s.peerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - peerClient, err := grpc.NewClient(grpcAddr) - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - if peerClient != nil { - s.logger.Debug("append peer client to peer client list", zap.String("grpc_addr", peerClient.GetAddress())) - s.peerClients[nodeId] = peerClient - } - } - } - - // close nonexistent clients - for nodeId, client := range s.peerClients { - if nodeConfig, exist := peers[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.peerClients, nodeId) - } - } - - // keep current peer nodes - s.logger.Debug("current peers", zap.Any("peers", peers)) - s.peers = peers - } - - // notify current cluster - if !reflect.DeepEqual(s.cluster, cluster) { - // convert to GetClusterResponse for channel output - clusterResp := &protobuf.GetClusterResponse{} - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) - if err != nil { - s.logger.Warn(err.Error()) - } - clusterResp.Cluster = clusterAny - - // output to channel - for c := range s.clusterChans { - c <- *clusterResp - } - - // keep current cluster - s.logger.Debug("current cluster", zap.Any("cluster", cluster)) - s.cluster = cluster - } - default: - time.Sleep(100 * time.Millisecond) - } - } -} - -func (s *GRPCService) stopUpdateCluster() { - s.logger.Info("close all peer clients") - for id, client := range s.peerClients { - s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Warn(err.Error()) - } - } - - if s.updateClusterStopCh != nil { - s.logger.Info("send a request to stop updating a cluster") - close(s.updateClusterStopCh) - } - - s.logger.Info("wait for the cluster update to stop") - <-s.updateClusterDoneCh - s.logger.Info("the cluster update has been stopped") -} - -func (s *GRPCService) NodeID() string { - return s.raftServer.NodeID() -} - -func (s *GRPCService) getSelfNode() (map[string]interface{}, error) { - return map[string]interface{}{ - "node_config": s.raftServer.nodeConfig.ToMap(), - "state": s.raftServer.State().String(), - }, nil -} - -func (s *GRPCService) getPeerNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} - var err error - - if peerClient, exist := s.peerClients[id]; exist { - nodeInfo, err = peerClient.GetNode(id) - if err != nil { - s.logger.Warn(err.Error()) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), - } - } - } else { - s.logger.Warn("node does not exist in peer list", zap.String("id", id)) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), - } - } - - return nodeInfo, nil -} - -func (s *GRPCService) getNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} - var err error - - if id == "" || id == s.NodeID() { - nodeInfo, err = s.getSelfNode() - } else { - nodeInfo, err = s.getPeerNode(id) - } - - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return nodeInfo, nil -} - -func (s *GRPCService) GetNode(ctx context.Context, req *protobuf.GetNodeRequest) (*protobuf.GetNodeResponse, error) { - resp := &protobuf.GetNodeResponse{} - - nodeInfo, err := s.getNode(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - nodeConfigAny := &any.Any{} - if nodeConfig, exist := nodeInfo["node_config"]; exist { - err = protobuf.UnmarshalAny(nodeConfig.(map[string]interface{}), nodeConfigAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } else { - s.logger.Error("missing node_config", zap.Any("node_config", nodeConfig)) - } - - state, exist := nodeInfo["state"].(string) - if !exist { - s.logger.Error("missing node state", zap.String("state", state)) - state = raft.Shutdown.String() - } - - resp.NodeConfig = nodeConfigAny - resp.State = state - - return resp, nil -} - -func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) error { - if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(id, nodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = client.SetNode(id, nodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - ins, err := protobuf.MarshalAny(req.NodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - nodeConfig := *ins.(*map[string]interface{}) - - err = s.setNode(req.Id, nodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) deleteNode(id string) error { - if s.raftServer.IsLeader() { - err := s.raftServer.DeleteNode(id) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = client.DeleteNode(id) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) DeleteNode(ctx context.Context, req *protobuf.DeleteNodeRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.deleteNode(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) getCluster() (map[string]interface{}, error) { - cluster, err := s.raftServer.GetCluster() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - // update node state - for nodeId := range cluster { - node, err := s.getNode(nodeId) - if err != nil { - s.logger.Error(err.Error()) - } - state := node["state"].(string) - - if _, ok := cluster[nodeId]; !ok { - cluster[nodeId] = map[string]interface{}{} - } - nodeInfo := cluster[nodeId].(map[string]interface{}) - nodeInfo["state"] = state - } - - return cluster, nil -} - -func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protobuf.GetClusterResponse, error) { - resp := &protobuf.GetClusterResponse{} - - cluster, err := s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Cluster = clusterAny - - return resp, nil -} - -func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_WatchClusterServer) error { - chans := make(chan protobuf.GetClusterResponse) - - s.clusterMutex.Lock() - s.clusterChans[chans] = struct{}{} - s.clusterMutex.Unlock() - - defer func() { - s.clusterMutex.Lock() - delete(s.clusterChans, chans) - s.clusterMutex.Unlock() - close(chans) - }() - - for resp := range chans { - err := server.Send(&resp) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return nil -} - -func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - err := s.raftServer.Snapshot() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) GetValue(ctx context.Context, req *protobuf.GetValueRequest) (*protobuf.GetValueResponse, error) { - s.stateMutex.RLock() - defer func() { - s.stateMutex.RUnlock() - }() - - resp := &protobuf.GetValueResponse{} - - value, err := s.raftServer.GetValue(req.Key) - if err != nil { - s.logger.Error(err.Error()) - switch err { - case blasterrors.ErrNotFound: - return resp, status.Error(codes.NotFound, err.Error()) - default: - return resp, status.Error(codes.Internal, err.Error()) - } - } - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny(value, valueAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Value = valueAny - - return resp, nil -} - -func (s *GRPCService) SetValue(ctx context.Context, req *protobuf.SetValueRequest) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - value, err := protobuf.MarshalAny(req.Value) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - if s.raftServer.IsLeader() { - err = s.raftServer.SetValue(req.Key, value) - if err != nil { - s.logger.Error(err.Error()) - switch err { - case blasterrors.ErrNotFound: - return resp, status.Error(codes.NotFound, err.Error()) - default: - return resp, status.Error(codes.Internal, err.Error()) - } - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - err = client.SetValue(req.Key, value) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - // notify - for c := range s.stateChans { - c <- protobuf.WatchStoreResponse{ - Command: protobuf.WatchStoreResponse_SET, - Key: req.Key, - Value: req.Value, - } - } - - return resp, nil -} - -func (s *GRPCService) DeleteValue(ctx context.Context, req *protobuf.DeleteValueRequest) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - if s.raftServer.IsLeader() { - err := s.raftServer.DeleteValue(req.Key) - if err != nil { - s.logger.Error(err.Error()) - switch err { - case blasterrors.ErrNotFound: - return resp, status.Error(codes.NotFound, err.Error()) - default: - return resp, status.Error(codes.Internal, err.Error()) - } - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - err = client.DeleteValue(req.Key) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - // notify - for c := range s.stateChans { - c <- protobuf.WatchStoreResponse{ - Command: protobuf.WatchStoreResponse_DELETE, - Key: req.Key, - } - } - - return resp, nil -} - -func (s *GRPCService) WatchStore(req *protobuf.WatchStoreRequest, server protobuf.Blast_WatchStoreServer) error { - chans := make(chan protobuf.WatchStoreResponse) - - s.stateMutex.Lock() - s.stateChans[chans] = struct{}{} - s.stateMutex.Unlock() - - defer func() { - s.stateMutex.Lock() - delete(s.stateChans, chans) - s.stateMutex.Unlock() - close(chans) - }() - - for resp := range chans { - if !strings.HasPrefix(resp.Key, req.Key) { - continue - } - err := server.Send(&resp) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return nil -} diff --git a/manager/http_router.go b/manager/http_router.go deleted file mode 100644 index 969cccf..0000000 --- a/manager/http_router.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "time" - - "github.com/gorilla/mux" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { - router, err := blasthttp.NewRouter(grpcAddr, logger) - if err != nil { - return nil, err - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/configs", NewPutHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/configs", NewGetHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/configs", NewDeleteHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/configs/{path:.*}", NewPutHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/configs/{path:.*}", NewGetHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/configs/{path:.*}", NewDeleteHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type GetHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewGetHandler(client *grpc.Client, logger *zap.Logger) *GetHandler { - return &GetHandler{ - client: client, - logger: logger, - } -} - -func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - key := vars["path"] - - value, err := h.client.GetValue(key) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // interface{} -> []byte - content, err = json.MarshalIndent(value, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type PutHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewPutHandler(client *grpc.Client, logger *zap.Logger) *PutHandler { - return &PutHandler{ - client: client, - logger: logger, - } -} - -func (h *PutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - key := vars["path"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // string -> map[string]interface{} - var value interface{} - err = json.Unmarshal(bodyBytes, &value) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - err = h.client.SetValue(key, value) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type DeleteHandler struct { - client *grpc.Client - logger *zap.Logger -} - -func NewDeleteHandler(client *grpc.Client, logger *zap.Logger) *DeleteHandler { - return &DeleteHandler{ - client: client, - logger: logger, - } -} - -func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - key := vars["path"] - - err := h.client.DeleteValue(key) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/manager/raft_command.go b/manager/raft_command.go deleted file mode 100644 index 97fa3df..0000000 --- a/manager/raft_command.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import "encoding/json" - -type command int - -const ( - unknown command = iota - setNode - deleteNode - setKeyValue - deleteKeyValue -) - -type message struct { - Command command `json:"command,omitempty"` - Data json.RawMessage `json:"data,omitempty"` -} - -func newMessage(cmd command, data interface{}) (*message, error) { - b, err := json.Marshal(data) - if err != nil { - return nil, err - } - return &message{ - Command: cmd, - Data: b, - }, nil -} diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go deleted file mode 100644 index d918e62..0000000 --- a/manager/raft_fsm.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "encoding/json" - "errors" - "io" - "io/ioutil" - "sync" - - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/maputils" - "go.uber.org/zap" -) - -type RaftFSM struct { - path string - logger *zap.Logger - - metadata maputils.Map - metadataMutex sync.RWMutex - - data maputils.Map -} - -func NewRaftFSM(path string, logger *zap.Logger) (*RaftFSM, error) { - return &RaftFSM{ - path: path, - logger: logger, - }, nil -} - -func (f *RaftFSM) Start() error { - f.logger.Info("initialize metadata") - f.metadata = maputils.Map{} - - f.logger.Info("initialize store data") - f.data = maputils.Map{} - - return nil -} - -func (f *RaftFSM) Stop() error { - return nil -} - -func (f *RaftFSM) GetNodeConfig(nodeId string) (map[string]interface{}, error) { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() - - nodeConfig, err := f.metadata.Get(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - if err == maputils.ErrNotFound { - return nil, blasterrors.ErrNotFound - } - return nil, err - } - - return nodeConfig.(maputils.Map).ToMap(), nil -} - -func (f *RaftFSM) SetNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() - - err := f.metadata.Merge(nodeId, nodeConfig) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - return err - } - - return nil -} - -func (f *RaftFSM) DeleteNodeConfig(nodeId string) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() - - err := f.metadata.Delete(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - return err - } - - return nil -} - -func (f *RaftFSM) GetValue(key string) (interface{}, error) { - value, err := f.data.Get(key) - if err != nil { - switch err { - case maputils.ErrNotFound: - f.logger.Debug("key does not found in the store data", zap.String("key", key)) - return nil, blasterrors.ErrNotFound - default: - f.logger.Error(err.Error(), zap.String("key", key)) - return nil, err - } - } - - var ret interface{} - switch value.(type) { - case maputils.Map: - ret = value.(maputils.Map).ToMap() - default: - ret = value - } - - return ret, nil -} - -func (f *RaftFSM) SetValue(key string, value interface{}, merge bool) error { - if merge { - err := f.data.Merge(key, value) - if err != nil { - f.logger.Error(err.Error(), zap.String("key", key), zap.Any("value", value), zap.Bool("merge", merge)) - return err - } - } else { - err := f.data.Set(key, value) - if err != nil { - f.logger.Error(err.Error(), zap.String("key", key), zap.Any("value", value), zap.Bool("merge", merge)) - return err - } - } - - return nil -} - -func (f *RaftFSM) DeleteValue(key string) error { - err := f.data.Delete(key) - if err != nil { - switch err { - case maputils.ErrNotFound: - f.logger.Debug("key does not found in the store data", zap.String("key", key)) - return blasterrors.ErrNotFound - default: - f.logger.Error(err.Error(), zap.String("key", key)) - return err - } - } - - return nil -} - -type fsmResponse struct { - error error -} - -func (f *RaftFSM) Apply(l *raft.Log) interface{} { - var msg message - err := json.Unmarshal(l.Data, &msg) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - switch msg.Command { - case setNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.SetNodeConfig(data["node_id"].(string), data["node_config"].(map[string]interface{})) - return &fsmResponse{error: err} - case deleteNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.DeleteNodeConfig(data["node_id"].(string)) - return &fsmResponse{error: err} - case setKeyValue: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.SetValue(data["key"].(string), data["value"], true) - return &fsmResponse{error: err} - case deleteKeyValue: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.DeleteValue(data["key"].(string)) - return &fsmResponse{error: err} - default: - err = errors.New("unsupported command") - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } -} - -func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { - f.logger.Info("snapshot") - - return &RaftFSMSnapshot{ - data: f.data, - logger: f.logger, - }, nil -} - -func (f *RaftFSM) Restore(rc io.ReadCloser) error { - f.logger.Info("restore") - - defer func() { - err := rc.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - data, err := ioutil.ReadAll(rc) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - err = json.Unmarshal(data, &f.data) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -type RaftFSMSnapshot struct { - data maputils.Map - logger *zap.Logger -} - -func (f *RaftFSMSnapshot) Persist(sink raft.SnapshotSink) error { - f.logger.Info("persist") - - defer func() { - err := sink.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - buff, err := json.Marshal(f.data) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - _, err = sink.Write(buff) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSMSnapshot) Release() { - f.logger.Info("release") -} diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go deleted file mode 100644 index 1107951..0000000 --- a/manager/raft_fsm_test.go +++ /dev/null @@ -1,505 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "io/ioutil" - "os" - "reflect" - "testing" - - "github.com/mosuka/blast/logutils" -) - -func TestRaftFSM_GetNode(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Errorf("%v", err) - } - - _ = fsm.SetNodeConfig("node1", map[string]interface{}{ - "bind_addr": ":16060", - "grpc_addr": ":17070", - "http_addr": ":18080", - }) - _ = fsm.SetNodeConfig("node2", map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - }) - _ = fsm.SetNodeConfig("node3", map[string]interface{}{ - "bind_addr": ":16062", - "grpc_addr": ":17072", - "http_addr": ":18082", - }) - - val1, err := fsm.GetNodeConfig("node2") - if err != nil { - t.Errorf("%v", err) - } - - exp1 := map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - -} - -func TestRaftFSM_SetNode(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Errorf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Errorf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Errorf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Errorf("%v", err) - } - }() - if err != nil { - t.Errorf("%v", err) - } - - _ = fsm.SetNodeConfig("node1", map[string]interface{}{ - "bind_addr": ":16060", - "grpc_addr": ":17070", - "http_addr": ":18080", - }) - _ = fsm.SetNodeConfig("node2", map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - }) - _ = fsm.SetNodeConfig("node3", map[string]interface{}{ - "bind_addr": ":16062", - "grpc_addr": ":17072", - "http_addr": ":18082", - }) - - val1, err := fsm.GetNodeConfig("node2") - if err != nil { - t.Errorf("%v", err) - } - exp1 := map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - _ = fsm.SetNodeConfig("node2", map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - "leader": true, - }) - - val2, err := fsm.GetNodeConfig("node2") - if err != nil { - t.Errorf("%v", err) - } - exp2 := map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - "leader": true, - } - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } -} - -func TestRaftFSM_DeleteNode(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Errorf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Errorf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Errorf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Errorf("%v", err) - } - }() - if err != nil { - t.Errorf("%v", err) - } - - _ = fsm.SetNodeConfig("node1", map[string]interface{}{ - "bind_addr": ":16060", - "grpc_addr": ":17070", - "http_addr": ":18080", - }) - _ = fsm.SetNodeConfig("node2", map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - }) - _ = fsm.SetNodeConfig("node3", map[string]interface{}{ - "bind_addr": ":16062", - "grpc_addr": ":17072", - "http_addr": ":18082", - }) - - val1, err := fsm.GetNodeConfig("node2") - if err != nil { - t.Errorf("%v", err) - } - exp1 := map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - err = fsm.DeleteNodeConfig("node2") - if err != nil { - t.Errorf("%v", err) - } - - val2, err := fsm.GetNodeConfig("node2") - if err == nil { - t.Errorf("expected error: %v", err) - } - - act1 = val2 - if reflect.DeepEqual(nil, act1) { - t.Errorf("expected content to see nil, saw %v", act1) - } -} - -func TestRaftFSM_Get(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Errorf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Errorf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Errorf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Errorf("%v", err) - } - }() - if err != nil { - t.Errorf("%v", err) - } - - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Errorf("%v", err) - } - - value, err := fsm.GetValue("/a") - if err != nil { - t.Errorf("%v", err) - } - - expectedValue := 1 - actualValue := value - if expectedValue != actualValue { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestRaftFSM_Set(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Errorf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Errorf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Errorf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Errorf("%v", err) - } - }() - if err != nil { - t.Errorf("%v", err) - } - - // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{ - "a": 1, - }, false) - if err != nil { - t.Errorf("%v", err) - } - val1, err := fsm.GetValue("/") - if err != nil { - t.Errorf("%v", err) - } - exp1 := map[string]interface{}{ - "a": 1, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - // merge {"a": "A"} - _ = fsm.SetValue("/", map[string]interface{}{ - "a": "A", - }, true) - if err != nil { - t.Errorf("%v", err) - } - val2, err := fsm.GetValue("/") - if err != nil { - t.Errorf("%v", err) - } - exp2 := map[string]interface{}{ - "a": "A", - } - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } - - // set {"a": {"b": "AB"}} - err = fsm.SetValue("/", map[string]interface{}{ - "a": map[string]interface{}{ - "b": "AB", - }, - }, false) - if err != nil { - t.Errorf("%v", err) - } - - val3, err := fsm.GetValue("/") - if err != nil { - t.Errorf("%v", err) - } - exp3 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": "AB", - }, - } - act3 := val3 - if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp3, act3) - } - - // merge {"a": {"c": "AC"}} - err = fsm.SetValue("/", map[string]interface{}{ - "a": map[string]interface{}{ - "c": "AC", - }, - }, true) - if err != nil { - t.Errorf("%v", err) - } - val4, err := fsm.GetValue("/") - if err != nil { - t.Errorf("%v", err) - } - exp4 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": "AB", - "c": "AC", - }, - } - act4 := val4 - if !reflect.DeepEqual(exp4, act4) { - t.Errorf("expected content to see %v, saw %v", exp4, act4) - } - - // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{ - "a": 1, - }, false) - if err != nil { - t.Errorf("%v", err) - } - val5, err := fsm.GetValue("/") - if err != nil { - t.Errorf("%v", err) - } - exp5 := map[string]interface{}{ - "a": 1, - } - act5 := val5 - if !reflect.DeepEqual(exp5, act5) { - t.Errorf("expected content to see %v, saw %v", exp5, act5) - } - - // TODO: merge {"a": {"c": "AC"}} - //fsm.applySet("/", map[string]interface{}{ - // "a": map[string]interface{}{ - // "c": "AC", - // }, - //}, true) - //val6, err := fsm.Get("/") - //if err != nil { - // t.Errorf("%v", err) - //} - //exp6 := map[string]interface{}{ - // "a": map[string]interface{}{ - // "c": "AC", - // }, - //} - //act6 := val6 - //if !reflect.DeepEqual(exp6, act6) { - // t.Errorf("expected content to see %v, saw %v", exp6, act6) - //} -} - -func TestRaftFSM_Delete(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Errorf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Errorf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Errorf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Errorf("%v", err) - } - }() - if err != nil { - t.Errorf("%v", err) - } - - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Errorf("%v", err) - } - - value, err := fsm.GetValue("/a") - if err != nil { - t.Errorf("%v", err) - } - - expectedValue := 1 - actualValue := value - if expectedValue != actualValue { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) - } - - err = fsm.DeleteValue("/a") - if err != nil { - t.Errorf("%v", err) - } - - value, err = fsm.GetValue("/a") - if err == nil { - t.Errorf("expected nil: %v", err) - } - - actualValue = value - if nil != actualValue { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} diff --git a/manager/raft_server.go b/manager/raft_server.go deleted file mode 100644 index fe29955..0000000 --- a/manager/raft_server.go +++ /dev/null @@ -1,612 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "encoding/json" - "errors" - "io/ioutil" - "net" - "os" - "path/filepath" - "sync" - "time" - - "github.com/hashicorp/raft" - raftboltdb "github.com/hashicorp/raft-boltdb" - raftbadgerdb "github.com/markthethomas/raft-badger" - _ "github.com/mosuka/blast/builtins" - "github.com/mosuka/blast/config" - blasterrors "github.com/mosuka/blast/errors" - "go.uber.org/zap" - //raftmdb "github.com/hashicorp/raft-mdb" -) - -type RaftServer struct { - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - bootstrap bool - logger *zap.Logger - - raft *raft.Raft - fsm *RaftFSM - mu sync.RWMutex -} - -func NewRaftServer(nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { - return &RaftServer{ - nodeConfig: nodeConfig, - indexConfig: indexConfig, - bootstrap: bootstrap, - logger: logger, - }, nil -} - -func (s *RaftServer) Start() error { - var err error - - fsmPath := filepath.Join(s.nodeConfig.DataDir, "store") - s.logger.Info("create finite state machine", zap.String("path", fsmPath)) - s.fsm, err = NewRaftFSM(fsmPath, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("start finite state machine") - err = s.fsm.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft config", zap.String("node_id", s.nodeConfig.NodeId)) - raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.nodeConfig.NodeId) - raftConfig.SnapshotThreshold = 1024 - raftConfig.LogOutput = ioutil.Discard - - s.logger.Info("resolve TCP address", zap.String("bind_addr", s.nodeConfig.BindAddr)) - addr, err := net.ResolveTCPAddr("tcp", s.nodeConfig.BindAddr) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create TCP transport", zap.String("bind_addr", s.nodeConfig.BindAddr)) - transport, err := raft.NewTCPTransport(s.nodeConfig.BindAddr, addr, 3, 10*time.Second, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - snapshotPath := s.nodeConfig.DataDir - s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) - snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft machine") - var logStore raft.LogStore - var stableStore raft.StableStore - switch s.nodeConfig.RaftStorageType { - case "boltdb": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - case "badger": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - default: - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - } - - s.logger.Info("create Raft machine") - s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, transport) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - if s.bootstrap { - s.logger.Info("configure Raft machine as bootstrap") - configuration := raft.Configuration{ - Servers: []raft.Server{ - { - ID: raftConfig.LocalID, - Address: transport.LocalAddr(), - }, - }, - } - s.raft.BootstrapCluster(configuration) - - s.logger.Info("wait for become a leader") - err = s.WaitForDetectLeader(60 * time.Second) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - // set node config - s.logger.Info("register its own node config", zap.String("node_id", s.nodeConfig.NodeId), zap.Any("node_config", s.nodeConfig)) - err = s.setNodeConfig(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - // set index config - s.logger.Info("register index config") - err := s.SetValue("index_config", s.indexConfig.ToMap()) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *RaftServer) Stop() error { - s.logger.Info("shutdown Raft machine") - f := s.raft.Shutdown() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - s.logger.Info("stop finite state machine") - err = s.fsm.Stop() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - timer := time.NewTimer(timeout) - defer timer.Stop() - - for { - select { - case <-ticker.C: - leaderAddr := s.raft.Leader() - if leaderAddr != "" { - s.logger.Debug("detect a leader", zap.String("address", string(leaderAddr))) - return leaderAddr, nil - } - case <-timer.C: - s.logger.Error("timeout exceeded") - return "", blasterrors.ErrTimeout - } - } -} - -func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { - leaderAddr, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - cf := s.raft.GetConfiguration() - err = cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - for _, server := range cf.Configuration().Servers { - if server.Address == leaderAddr { - return server.ID, nil - } - } - - s.logger.Error(blasterrors.ErrNotFoundLeader.Error()) - return "", blasterrors.ErrNotFoundLeader -} - -func (s *RaftServer) NodeID() string { - return s.nodeConfig.NodeId -} - -func (s *RaftServer) Stats() map[string]string { - return s.raft.Stats() -} - -func (s *RaftServer) State() raft.RaftState { - return s.raft.State() -} - -func (s *RaftServer) IsLeader() bool { - return s.State() == raft.Leader -} - -func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { - _, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) getNodeConfig(nodeId string) (map[string]interface{}, error) { - nodeConfig, err := s.fsm.GetNodeConfig(nodeId) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return nodeConfig, nil -} - -func (s *RaftServer) setNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { - msg, err := newMessage( - setNode, - map[string]interface{}{ - "node_id": nodeId, - "node_config": nodeConfig, - }, - ) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - msgBytes, err := json.Marshal(msg) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) deleteNodeConfig(nodeId string) error { - msg, err := newMessage( - deleteNode, - map[string]interface{}{ - "node_id": nodeId, - }, - ) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - msgBytes, err := json.Marshal(msg) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - node := make(map[string]interface{}, 0) - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(id) { - nodeConfig, err := s.getNodeConfig(id) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - node["node_config"] = nodeConfig - break - } - } - - return node, nil -} - -func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) error { - if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Info("node already joined the cluster", zap.String("id", nodeId)) - return nil - } - } - - bindAddr, ok := nodeConfig["bind_addr"].(string) - if !ok { - s.logger.Error("missing metadata", zap.String("bind_addr", bindAddr)) - return errors.New("missing metadata") - } - - // add node to Raft cluster - s.logger.Info("add voter", zap.String("nodeId", nodeId), zap.String("address", bindAddr)) - f := s.raft.AddVoter(raft.ServerID(nodeId), raft.ServerAddress(bindAddr), 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - // set node config - err = s.setNodeConfig(nodeId, nodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) DeleteNode(nodeId string) error { - if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - // delete node from Raft cluster - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Debug("remove server", zap.String("node_id", nodeId)) - f := s.raft.RemoveServer(server.ID, 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - } - - // delete node config - err = s.deleteNodeConfig(nodeId) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) GetCluster() (map[string]interface{}, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - cluster := map[string]interface{}{} - for _, server := range cf.Configuration().Servers { - node, err := s.GetNode(string(server.ID)) - if err != nil { - s.logger.Warn(err.Error()) - node = map[string]interface{}{} - } - cluster[string(server.ID)] = node - } - - return cluster, nil -} - -func (s *RaftServer) Snapshot() error { - f := s.raft.Snapshot() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) GetValue(key string) (interface{}, error) { - value, err := s.fsm.GetValue(key) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return value, nil -} - -func (s *RaftServer) SetValue(key string, value interface{}) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - msg, err := newMessage( - setKeyValue, - map[string]interface{}{ - "key": key, - "value": value, - }, - ) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - msgBytes, err := json.Marshal(msg) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) DeleteValue(key string) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - msg, err := newMessage( - deleteKeyValue, - map[string]interface{}{ - "key": key, - }, - ) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - msgBytes, err := json.Marshal(msg) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} diff --git a/manager/server.go b/manager/server.go deleted file mode 100644 index bbef34b..0000000 --- a/manager/server.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/http" - "go.uber.org/zap" -) - -type Server struct { - clusterConfig *config.ClusterConfig - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger - - raftServer *RaftServer - grpcService *GRPCService - grpcServer *grpc.Server - httpRouter *http.Router - httpServer *http.Server -} - -func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - return &Server{ - clusterConfig: clusterConfig, - nodeConfig: nodeConfig, - indexConfig: indexConfig, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() { - var err error - - // bootstrap node? - bootstrap := s.clusterConfig.PeerAddr == "" - s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) - - // create raft server - s.raftServer, err = NewRaftServer(s.nodeConfig, s.indexConfig, bootstrap, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC service - s.grpcService, err = NewGRPCService(s.raftServer, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC server - s.grpcServer, err = grpc.NewServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP router - s.httpRouter, err = NewRouter(s.nodeConfig.GRPCAddr, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP server - s.httpServer, err = http.NewServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // start Raft server - s.logger.Info("start Raft server") - err = s.raftServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start gRPC service - s.logger.Info("start gRPC service") - go func() { - err := s.grpcService.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC server - s.logger.Info("start gRPC server") - go func() { - err := s.grpcServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start HTTP server - s.logger.Info("start HTTP server") - go func() { - _ = s.httpServer.Start() - }() - - // join to the existing cluster - if !bootstrap { - client, err := grpc.NewClient(s.clusterConfig.PeerAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - err = client.SetNode(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - } -} - -func (s *Server) Stop() { - s.logger.Info("stop HTTP server") - err := s.httpServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - err = s.httpRouter.Close() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC server") - err = s.grpcServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC service") - err = s.grpcService.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop Raft server") - err = s.raftServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } -} diff --git a/manager/server_test.go b/manager/server_test.go deleted file mode 100644 index 77e8eff..0000000 --- a/manager/server_test.go +++ /dev/null @@ -1,2258 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/testutils" -) - -func TestServer_Start(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestServer_LivenessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // liveness - liveness, err := client.LivenessProbe() - if err != nil { - t.Fatalf("%v", err) - } - expLiveness := protobuf.LivenessProbeResponse_ALIVE.String() - actLiveness := liveness - if expLiveness != actLiveness { - t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) - } -} - -func TestServer_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // readiness - readiness, err := client.ReadinessProbe() - if err != nil { - t.Fatalf("%v", err) - } - expReadiness := protobuf.ReadinessProbeResponse_READY.String() - actReadiness := readiness - if expReadiness != actReadiness { - t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) - } -} - -func TestServer_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get node - nodeInfo, err := client.GetNode(nodeConfig.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNodeInfo := map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", - } - actNodeInfo := nodeInfo - if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { - t.Errorf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) - } -} - -func TestServer_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster - cluster, err := client.GetCluster() - if err != nil { - t.Errorf("%v", err) - } - expCluster := map[string]interface{}{ - nodeConfig.NodeId: map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", - }, - } - actCluster := cluster - if !reflect.DeepEqual(expCluster, actCluster) { - t.Errorf("expected content to see %v, saw %v", expCluster, actCluster) - } -} - -func TestServer_GetIndexMapping(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexMapping := indexConfig.IndexMapping - if err != nil { - t.Fatalf("%v", err) - } - - actIntr, err := client.GetValue("index_config/index_mapping") - if err != nil { - t.Fatalf("%v", err) - } - - actIndexMapping, err := indexutils.NewIndexMappingFromMap(*actIntr.(*map[string]interface{})) - if err != nil { - t.Fatalf("%v", err) - } - - if !reflect.DeepEqual(expIndexMapping, actIndexMapping) { - t.Errorf("expected content to see %v, saw %v", expIndexMapping, actIndexMapping) - } -} - -func TestServer_GetIndexType(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexType := indexConfig.IndexType - if err != nil { - t.Errorf("%v", err) - } - - actIndexType, err := client.GetValue("index_config/index_type") - if err != nil { - t.Errorf("%v", err) - } - - if expIndexType != *actIndexType.(*string) { - t.Errorf("expected content to see %v, saw %v", expIndexType, *actIndexType.(*string)) - } -} - -func TestServer_GetIndexStorageType(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexStorageType := indexConfig.IndexStorageType - if err != nil { - t.Errorf("%v", err) - } - - actIndexStorageType, err := client.GetValue("index_config/index_storage_type") - if err != nil { - t.Errorf("%v", err) - } - - if expIndexStorageType != *actIndexStorageType.(*string) { - t.Errorf("expected content to see %v, saw %v", expIndexStorageType, *actIndexStorageType.(*string)) - } -} - -func TestServer_SetState(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set value - err = client.SetValue("test/key1", "val1") - if err != nil { - t.Errorf("%v", err) - } - - // get value - val1, err := client.GetValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - - expVal1 := "val1" - - actVal1 := *val1.(*string) - - if expVal1 != actVal1 { - t.Errorf("expected content to see %v, saw %v", expVal1, actVal1) - } -} - -func TestServer_GetState(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set value - err = client.SetValue("test/key1", "val1") - if err != nil { - t.Errorf("%v", err) - } - - // get value - val1, err := client.GetValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - - expVal1 := "val1" - - actVal1 := *val1.(*string) - - if expVal1 != actVal1 { - t.Errorf("expected content to see %v, saw %v", expVal1, actVal1) - } -} - -func TestServer_DeleteState(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set value - err = client.SetValue("test/key1", "val1") - if err != nil { - t.Errorf("%v", err) - } - - // get value - val1, err := client.GetValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - - expVal1 := "val1" - - actVal1 := *val1.(*string) - - if expVal1 != actVal1 { - t.Errorf("expected content to see %v, saw %v", expVal1, actVal1) - } - - // delete value - err = client.DeleteValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - - val1, err = client.GetValue("test/key1") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - - if val1 != nil { - t.Errorf("%v", err) - } - - // delete non-existing data - err = client.DeleteValue("test/non-existing") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } -} - -func TestCluster_Start(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestCluster_LivenessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // liveness check for manager1 - liveness1, err := client1.LivenessProbe() - if err != nil { - t.Errorf("%v", err) - } - expLiveness1 := protobuf.LivenessProbeResponse_ALIVE.String() - actLiveness1 := liveness1 - if expLiveness1 != actLiveness1 { - t.Errorf("expected content to see %v, saw %v", expLiveness1, actLiveness1) - } - - // liveness check for manager2 - liveness2, err := client2.LivenessProbe() - if err != nil { - t.Errorf("%v", err) - } - expLiveness2 := protobuf.LivenessProbeResponse_ALIVE.String() - actLiveness2 := liveness2 - if expLiveness2 != actLiveness2 { - t.Errorf("expected content to see %v, saw %v", expLiveness2, actLiveness2) - } - - // liveness check for manager3 - liveness3, err := client3.LivenessProbe() - if err != nil { - t.Errorf("%v", err) - } - expLiveness3 := protobuf.LivenessProbeResponse_ALIVE.String() - actLiveness3 := liveness3 - if expLiveness3 != actLiveness3 { - t.Errorf("expected content to see %v, saw %v", expLiveness3, actLiveness3) - } -} - -func TestCluster_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // readiness check for manager1 - readiness1, err := client1.ReadinessProbe() - if err != nil { - t.Errorf("%v", err) - } - expReadiness1 := protobuf.ReadinessProbeResponse_READY.String() - actReadiness1 := readiness1 - if expReadiness1 != actReadiness1 { - t.Errorf("expected content to see %v, saw %v", expReadiness1, actReadiness1) - } - - // readiness check for manager2 - readiness2, err := client2.ReadinessProbe() - if err != nil { - t.Errorf("%v", err) - } - expReadiness2 := protobuf.ReadinessProbeResponse_READY.String() - actReadiness2 := readiness2 - if expReadiness2 != actReadiness2 { - t.Errorf("expected content to see %v, saw %v", expReadiness2, actReadiness2) - } - - // readiness check for manager3 - readiness3, err := client3.ReadinessProbe() - if err != nil { - t.Errorf("%v", err) - } - expReadiness3 := protobuf.ReadinessProbeResponse_READY.String() - actReadiness3 := readiness3 - if expReadiness3 != actReadiness3 { - t.Errorf("expected content to see %v, saw %v", expReadiness3, actReadiness3) - } -} - -func TestCluster_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // get all node info from all nodes - node11, err := client1.GetNode(nodeConfig1.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode11 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), - } - actNode11 := node11 - if !reflect.DeepEqual(expNode11, actNode11) { - t.Errorf("expected content to see %v, saw %v", expNode11, actNode11) - } - - node12, err := client1.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode12 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode12 := node12 - if !reflect.DeepEqual(expNode12, actNode12) { - t.Errorf("expected content to see %v, saw %v", expNode12, actNode12) - } - - node13, err := client1.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode13 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode13 := node13 - if !reflect.DeepEqual(expNode13, actNode13) { - t.Errorf("expected content to see %v, saw %v", expNode13, actNode13) - } - - node21, err := client2.GetNode(nodeConfig1.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode21 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), - } - actNode21 := node21 - if !reflect.DeepEqual(expNode21, actNode21) { - t.Errorf("expected content to see %v, saw %v", expNode21, actNode21) - } - - node22, err := client2.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode22 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode22 := node22 - if !reflect.DeepEqual(expNode22, actNode22) { - t.Errorf("expected content to see %v, saw %v", expNode22, actNode22) - } - - node23, err := client2.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode23 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode23 := node23 - if !reflect.DeepEqual(expNode23, actNode23) { - t.Errorf("expected content to see %v, saw %v", expNode23, actNode23) - } - - node31, err := client3.GetNode(nodeConfig1.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode31 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), - } - actNode31 := node31 - if !reflect.DeepEqual(expNode31, actNode31) { - t.Errorf("expected content to see %v, saw %v", expNode31, actNode31) - } - - node32, err := client3.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode32 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode32 := node32 - if !reflect.DeepEqual(expNode32, actNode32) { - t.Errorf("expected content to see %v, saw %v", expNode32, actNode32) - } - - node33, err := client3.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Errorf("%v", err) - } - expNode33 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode33 := node33 - if !reflect.DeepEqual(expNode33, actNode33) { - t.Errorf("expected content to see %v, saw %v", expNode33, actNode33) - } -} - -func TestCluster_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // get cluster info from manager1 - cluster1, err := client1.GetCluster() - if err != nil { - t.Errorf("%v", err) - } - expCluster1 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), - }, - } - actCluster1 := cluster1 - if !reflect.DeepEqual(expCluster1, actCluster1) { - t.Errorf("expected content to see %v, saw %v", expCluster1, actCluster1) - } - - cluster2, err := client2.GetCluster() - if err != nil { - t.Errorf("%v", err) - } - expCluster2 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), - }, - } - actCluster2 := cluster2 - if !reflect.DeepEqual(expCluster2, actCluster2) { - t.Errorf("expected content to see %v, saw %v", expCluster2, actCluster2) - } - - cluster3, err := client3.GetCluster() - if err != nil { - t.Errorf("%v", err) - } - expCluster3 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), - }, - } - actCluster3 := cluster3 - if !reflect.DeepEqual(expCluster3, actCluster3) { - t.Errorf("expected content to see %v, saw %v", expCluster3, actCluster3) - } -} - -func TestCluster_GetState(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // get index mapping from all nodes - indexConfig1, err := client1.GetValue("index_config") - if err != nil { - t.Errorf("%v", err) - } - expIndexConfig1 := indexConfig.ToMap() - actIndexConfig1 := *indexConfig1.(*map[string]interface{}) - if !reflect.DeepEqual(expIndexConfig1, actIndexConfig1) { - t.Errorf("expected content to see %v, saw %v", expIndexConfig1, actIndexConfig1) - } - - indexConfig2, err := client2.GetValue("index_config") - if err != nil { - t.Errorf("%v", err) - } - expIndexConfig2 := indexConfig.ToMap() - actIndexConfig2 := *indexConfig2.(*map[string]interface{}) - if !reflect.DeepEqual(expIndexConfig2, actIndexConfig2) { - t.Errorf("expected content to see %v, saw %v", expIndexConfig2, actIndexConfig2) - } - - indexConfig3, err := client3.GetValue("index_config") - if err != nil { - t.Errorf("%v", err) - } - expIndexConfig3 := indexConfig.ToMap() - actIndexConfig3 := *indexConfig3.(*map[string]interface{}) - if !reflect.DeepEqual(expIndexConfig3, actIndexConfig3) { - t.Errorf("expected content to see %v, saw %v", expIndexConfig3, actIndexConfig3) - } -} - -func TestCluster_SetState(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - err = client1.SetValue("test/key1", "val1") - if err != nil { - t.Errorf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - val11, err := client1.GetValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - expVal11 := "val1" - actVal11 := *val11.(*string) - if expVal11 != actVal11 { - t.Errorf("expected content to see %v, saw %v", expVal11, actVal11) - } - val21, err := client2.GetValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - expVal21 := "val1" - actVal21 := *val21.(*string) - if expVal21 != actVal21 { - t.Errorf("expected content to see %v, saw %v", expVal21, actVal21) - } - val31, err := client3.GetValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - expVal31 := "val1" - actVal31 := *val31.(*string) - if expVal31 != actVal31 { - t.Errorf("expected content to see %v, saw %v", expVal31, actVal31) - } - - err = client2.SetValue("test/key2", "val2") - if err != nil { - t.Errorf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - val12, err := client1.GetValue("test/key2") - if err != nil { - t.Errorf("%v", err) - } - expVal12 := "val2" - actVal12 := *val12.(*string) - if expVal12 != actVal12 { - t.Errorf("expected content to see %v, saw %v", expVal12, actVal12) - } - val22, err := client2.GetValue("test/key2") - if err != nil { - t.Errorf("%v", err) - } - expVal22 := "val2" - actVal22 := *val22.(*string) - if expVal22 != actVal22 { - t.Errorf("expected content to see %v, saw %v", expVal22, actVal22) - } - val32, err := client3.GetValue("test/key2") - if err != nil { - t.Errorf("%v", err) - } - expVal32 := "val2" - actVal32 := *val32.(*string) - if expVal32 != actVal32 { - t.Errorf("expected content to see %v, saw %v", expVal32, actVal32) - } - - err = client3.SetValue("test/key3", "val3") - if err != nil { - t.Errorf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - val13, err := client1.GetValue("test/key3") - if err != nil { - t.Errorf("%v", err) - } - expVal13 := "val3" - actVal13 := *val13.(*string) - if expVal13 != actVal13 { - t.Errorf("expected content to see %v, saw %v", expVal13, actVal13) - } - val23, err := client2.GetValue("test/key3") - if err != nil { - t.Errorf("%v", err) - } - expVal23 := "val3" - actVal23 := *val23.(*string) - if expVal23 != actVal23 { - t.Errorf("expected content to see %v, saw %v", expVal23, actVal23) - } - val33, err := client3.GetValue("test/key3") - if err != nil { - t.Errorf("%v", err) - } - expVal33 := "val3" - actVal33 := *val33.(*string) - if expVal33 != actVal33 { - t.Errorf("expected content to see %v, saw %v", expVal33, actVal33) - } -} - -func TestCluster_DeleteState(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server1 - server1.Start() - - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Errorf("%v", err) - } - - // set test data before delete - err = client1.SetValue("test/key1", "val1") - if err != nil { - t.Errorf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - val11, err := client1.GetValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - expVal11 := "val1" - actVal11 := *val11.(*string) - if expVal11 != actVal11 { - t.Errorf("expected content to see %v, saw %v", expVal11, actVal11) - } - val21, err := client2.GetValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - expVal21 := "val1" - actVal21 := *val21.(*string) - if expVal21 != actVal21 { - t.Errorf("expected content to see %v, saw %v", expVal21, actVal21) - } - val31, err := client3.GetValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - expVal31 := "val1" - actVal31 := *val31.(*string) - if expVal31 != actVal31 { - t.Errorf("expected content to see %v, saw %v", expVal31, actVal31) - } - - err = client2.SetValue("test/key2", "val2") - if err != nil { - t.Errorf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - val12, err := client1.GetValue("test/key2") - if err != nil { - t.Errorf("%v", err) - } - expVal12 := "val2" - actVal12 := *val12.(*string) - if expVal12 != actVal12 { - t.Errorf("expected content to see %v, saw %v", expVal12, actVal12) - } - val22, err := client2.GetValue("test/key2") - if err != nil { - t.Errorf("%v", err) - } - expVal22 := "val2" - actVal22 := *val22.(*string) - if expVal22 != actVal22 { - t.Errorf("expected content to see %v, saw %v", expVal22, actVal22) - } - val32, err := client3.GetValue("test/key2") - if err != nil { - t.Errorf("%v", err) - } - expVal32 := "val2" - actVal32 := *val32.(*string) - if expVal32 != actVal32 { - t.Errorf("expected content to see %v, saw %v", expVal32, actVal32) - } - - err = client3.SetValue("test/key3", "val3") - if err != nil { - t.Errorf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - val13, err := client1.GetValue("test/key3") - if err != nil { - t.Errorf("%v", err) - } - expVal13 := "val3" - actVal13 := *val13.(*string) - if expVal13 != actVal13 { - t.Errorf("expected content to see %v, saw %v", expVal13, actVal13) - } - val23, err := client2.GetValue("test/key3") - if err != nil { - t.Errorf("%v", err) - } - expVal23 := "val3" - actVal23 := *val23.(*string) - if expVal23 != actVal23 { - t.Errorf("expected content to see %v, saw %v", expVal23, actVal23) - } - val33, err := client3.GetValue("test/key3") - if err != nil { - t.Errorf("%v", err) - } - expVal33 := "val3" - actVal33 := *val33.(*string) - if expVal33 != actVal33 { - t.Errorf("expected content to see %v, saw %v", expVal33, actVal33) - } - - // delete - err = client1.DeleteValue("test/key1") - if err != nil { - t.Errorf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - val11, err = client1.GetValue("test/key1") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - if val11 != nil { - t.Errorf("%v", err) - } - val21, err = client2.GetValue("test/key1") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - if val21 != nil { - t.Errorf("%v", err) - } - val31, err = client3.GetValue("test/key1") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - if val31 != nil { - t.Errorf("%v", err) - } - - err = client2.DeleteValue("test/key2") - if err != nil { - t.Errorf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - val12, err = client1.GetValue("test/key2") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - if val12 != nil { - t.Errorf("%v", err) - } - val22, err = client2.GetValue("test/key2") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - if val22 != nil { - t.Errorf("%v", err) - } - val32, err = client3.GetValue("test/key2") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - if val32 != nil { - t.Errorf("%v", err) - } - - err = client3.DeleteValue("test/key3") - if err != nil { - t.Errorf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - val13, err = client1.GetValue("test/key3") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - if val13 != nil { - t.Errorf("%v", err) - } - val23, err = client2.GetValue("test/key3") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - if val23 != nil { - t.Errorf("%v", err) - } - val33, err = client3.GetValue("test/key3") - if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) - } - if val33 != nil { - t.Errorf("%v", err) - } - - // delete non-existing data from manager1 - err = client1.DeleteValue("test/non-existing") - if err == nil { - t.Errorf("%v", err) - } - - // delete non-existing data from manager2 - err = client2.DeleteValue("test/non-existing") - if err == nil { - t.Errorf("%v", err) - } - - // delete non-existing data from manager3 - err = client3.DeleteValue("test/non-existing") - if err == nil { - t.Errorf("%v", err) - } -} diff --git a/indexutils/indexutils.go b/mapping/mapping.go similarity index 61% rename from indexutils/indexutils.go rename to mapping/mapping.go index 5c2dcfa..862cdf3 100644 --- a/indexutils/indexutils.go +++ b/mapping/mapping.go @@ -1,37 +1,25 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexutils +package mapping import ( "encoding/json" "io/ioutil" "os" - "github.com/blevesearch/bleve/mapping" + "github.com/blevesearch/bleve/v2/mapping" ) +func NewIndexMapping() *mapping.IndexMappingImpl { + return mapping.NewIndexMapping() +} + func NewIndexMappingFromBytes(indexMappingBytes []byte) (*mapping.IndexMappingImpl, error) { indexMapping := mapping.NewIndexMapping() - err := indexMapping.UnmarshalJSON(indexMappingBytes) - if err != nil { + if err := indexMapping.UnmarshalJSON(indexMappingBytes); err != nil { return nil, err } - err = indexMapping.Validate() - if err != nil { + if err := indexMapping.Validate(); err != nil { return nil, err } diff --git a/maputils/error.go b/maputils/error.go deleted file mode 100644 index 455c9fc..0000000 --- a/maputils/error.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maputils - -import "errors" - -var ( - ErrNotFound = errors.New("not found") -) diff --git a/maputils/maputils.go b/maputils/maputils.go deleted file mode 100644 index 36ffe67..0000000 --- a/maputils/maputils.go +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maputils - -import ( - "encoding/json" - "errors" - "strings" - - "github.com/imdario/mergo" - "github.com/stretchr/objx" - yaml "gopkg.in/yaml.v2" -) - -func splitKey(path string) []string { - keys := make([]string, 0) - for _, k := range strings.Split(path, "/") { - if k != "" { - keys = append(keys, k) - } - } - - return keys -} - -func makeSelector(key string) string { - return strings.Join(splitKey(key), objx.PathSeparator) -} - -func normalize(value interface{}) interface{} { - switch value.(type) { - case map[string]interface{}: - ret := Map{} - for k, v := range value.(map[string]interface{}) { - ret[k] = normalize(v) - } - return ret - case map[interface{}]interface{}: // when unmarshaled by yaml - ret := Map{} - for k, v := range value.(map[interface{}]interface{}) { - ret[k.(string)] = normalize(v) - } - return ret - case []interface{}: - ret := make([]interface{}, 0) - for _, v := range value.([]interface{}) { - ret = append(ret, normalize(v)) - } - return ret - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128: - return value - default: - return value - } -} - -func makeMap(path string, value interface{}) interface{} { - var ret interface{} - - keys := splitKey(path) - - if len(keys) >= 1 { - ret = Map{keys[0]: makeMap(strings.Join(keys[1:], "/"), value)} - } else if len(keys) == 0 { - ret = normalize(value) - } - - return ret -} - -type Map map[string]interface{} - -func New() Map { - return Map{} -} - -func FromMap(src map[string]interface{}) Map { - return normalize(src).(Map) -} - -func FromJSON(src []byte) (Map, error) { - t := map[string]interface{}{} - err := json.Unmarshal(src, &t) - if err != nil { - return nil, err - } - - return FromMap(t), nil -} - -func FromYAML(src []byte) (Map, error) { - t := map[string]interface{}{} - err := yaml.Unmarshal(src, &t) - if err != nil { - return nil, err - } - - return FromMap(t), nil -} - -func (m Map) Has(key string) (bool, error) { - _, err := m.Get(key) - if err != nil { - return false, err - } - - return true, nil -} - -func (m Map) Set(key string, value interface{}) error { - _ = m.Delete(key) - - err := m.Merge(key, value) - if err != nil { - return err - } - - return nil -} - -func (m Map) Merge(key string, value interface{}) error { - mm := makeMap(key, value).(Map) - - err := mergo.Merge(&m, mm, mergo.WithOverride) - if err != nil { - return err - } - - return nil -} - -func (m Map) Get(key string) (interface{}, error) { - var tmpMap interface{} - - tmpMap = m - - keys := splitKey(key) - - if len(keys) <= 0 { - return tmpMap.(Map).ToMap(), nil - } - - iter := newIterator(splitKey(key)) - var value interface{} - for { - k, err := iter.value() - if err != nil { - return nil, err - } - - if _, ok := tmpMap.(Map)[k]; !ok { - return nil, ErrNotFound - } - - if iter.hasNext() { - tmpMap = tmpMap.(Map)[k] - iter.next() - } else { - value = tmpMap.(Map)[k] - break - } - } - - return value, nil -} - -func (m Map) Delete(key string) error { - var tmpMap interface{} - - tmpMap = m - - keys := splitKey(key) - - if len(keys) <= 0 { - // clear map - err := m.Clear() - if err != nil { - return err - } - return nil - } - - iter := newIterator(splitKey(key)) - for { - k, err := iter.value() - if err != nil { - return err - } - - if _, ok := tmpMap.(Map)[k]; !ok { - return ErrNotFound - } - - if iter.hasNext() { - tmpMap = tmpMap.(Map)[k] - iter.next() - } else { - delete(tmpMap.(Map), k) - break - } - } - - return nil -} - -func (m Map) Clear() error { - for k := range m { - delete(m, k) - } - - return nil -} - -func (m Map) toMap(value interface{}) interface{} { - switch value.(type) { - case Map: - ret := map[string]interface{}{} - for k, v := range value.(Map) { - ret[k] = m.toMap(v) - } - return ret - case []interface{}: - ret := make([]interface{}, 0) - for _, v := range value.([]interface{}) { - ret = append(ret, m.toMap(v)) - } - return ret - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128: - return value - default: - return value - } -} - -func (m Map) ToMap() map[string]interface{} { - return m.toMap(m).(map[string]interface{}) -} - -func (m Map) ToJSON() ([]byte, error) { - mm := m.ToMap() - b, err := json.Marshal(&mm) - if err != nil { - return nil, err - } - - return b, nil -} - -func (m Map) ToYAML() ([]byte, error) { - mm := m.ToMap() - b, err := yaml.Marshal(&mm) - if err != nil { - return nil, err - } - - return b, nil -} - -type iterator struct { - keys []string - pos int -} - -func newIterator(keys []string) *iterator { - return &iterator{ - keys: keys, - pos: 0, - } -} - -func (i *iterator) hasNext() bool { - return i.pos < len(i.keys)-1 -} - -func (i *iterator) next() bool { - i.pos++ - return i.pos < len(i.keys)-1 -} - -func (i *iterator) value() (string, error) { - if i.pos > len(i.keys)-1 { - return "", errors.New("value is not valid after iterator finished") - } - return i.keys[i.pos], nil -} diff --git a/maputils/maputils_test.go b/maputils/maputils_test.go deleted file mode 100644 index c6c175d..0000000 --- a/maputils/maputils_test.go +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maputils - -import ( - "bytes" - "reflect" - "testing" -) - -func Test_splitKey(t *testing.T) { - key1 := "/a/b/c/d" - keys1 := splitKey(key1) - exp1 := []string{"a", "b", "c", "d"} - act1 := keys1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - key2 := "/" - keys2 := splitKey(key2) - exp2 := make([]string, 0) - act2 := keys2 - if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } - - key3 := "" - keys3 := splitKey(key3) - exp3 := make([]string, 0) - act3 := keys3 - if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp3, act3) - } -} - -func Test_makeSelector(t *testing.T) { - key1 := "/a/b/c/d" - selector1 := makeSelector(key1) - exp1 := "a.b.c.d" - act1 := selector1 - if exp1 != act1 { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - key2 := "/" - selector2 := makeSelector(key2) - exp2 := "" - act2 := selector2 - if exp2 != act2 { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } - - key3 := "" - selector3 := makeSelector(key3) - exp3 := "" - act3 := selector3 - if exp3 != act3 { - t.Errorf("expected content to see %v, saw %v", exp3, act3) - } -} - -func Test_normalize(t *testing.T) { - data1 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - val1 := normalize(data1) - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_makeMap(t *testing.T) { - val1 := makeMap("/a/b/c", "C").(Map) - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "C", - }, - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - val2 := makeMap("a/b", map[string]interface{}{"c": "C"}).(Map) - exp2 := Map{ - "a": Map{ - "b": Map{ - "c": "C", - }, - }, - } - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } -} - -func TestMap_FromMap(t *testing.T) { - map1 := FromMap(map[string]interface{}{ - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - }) - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } -} - -func TestMap_ToMap(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - val1 := map1.ToMap() - exp1 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_FromYAML(t *testing.T) { - map1, err := FromYAML([]byte(`a: - b: - c: abc - d: abd - e: - - ae1 - - ae2 -`)) - if err != nil { - t.Errorf("%v", err) - } - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_ToYAML(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - val1, err := map1.ToYAML() - if err != nil { - t.Errorf("%v", err) - } - exp1 := []byte(`a: - b: - c: abc - d: abd - e: - - ae1 - - ae2 -`) - act1 := val1 - if !bytes.Equal(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_FromJSON(t *testing.T) { - map1, err := FromJSON([]byte(`{"a":{"b":{"c":"abc","d":"abd"},"e":["ae1","ae2"]}}`)) - if err != nil { - t.Errorf("%v", err) - } - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_ToJSON(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - val1, err := map1.ToJSON() - if err != nil { - t.Errorf("%v", err) - } - exp1 := []byte(`{"a":{"b":{"c":"abc","d":"abd"},"e":["ae1","ae2"]}}`) - act1 := val1 - if !bytes.Equal(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_Has(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - val1, err := map1.Has("a/b/c") - if err != nil { - t.Errorf("%v", err) - } - exp1 := true - act1 := val1 - if exp1 != act1 { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - val2, err := map1.Get("a/b/f") - if err != ErrNotFound { - t.Errorf("%v", err) - } - exp2 := false - act2 := val2 - if exp2 == act2 { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } -} - -func Test_Set(t *testing.T) { - map1 := Map{} - - err := map1.Set("/", Map{"a": "A"}) - if err != nil { - t.Errorf("%v", err) - } - exp1 := Map{ - "a": "A", - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - err = map1.Set("/", Map{"A": "a"}) - if err != nil { - t.Errorf("%v", err) - } - exp2 := Map{ - "A": "a", - } - act2 := map1 - if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } - - err = map1.Set("/", Map{"A": 1}) - if err != nil { - t.Errorf("%v", err) - } - exp3 := Map{ - "A": 1, - } - act3 := map1 - if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } - - err = map1.Set("/A", "AAA") - if err != nil { - t.Errorf("%v", err) - } - exp4 := Map{ - "A": "AAA", - } - act4 := map1 - if !reflect.DeepEqual(exp4, act4) { - t.Errorf("expected content to see %v, saw %v", exp4, act4) - } - - err = map1.Set("/B", "BBB") - if err != nil { - t.Errorf("%v", err) - } - exp5 := Map{ - "A": "AAA", - "B": "BBB", - } - act5 := map1 - if !reflect.DeepEqual(exp5, act5) { - t.Errorf("expected content to see %v, saw %v", exp5, act5) - } - - err = map1.Set("/C", map[string]interface{}{"D": "CCC-DDD"}) - if err != nil { - t.Errorf("%v", err) - } - exp6 := Map{ - "A": "AAA", - "B": "BBB", - "C": Map{ - "D": "CCC-DDD", - }, - } - act6 := map1 - if !reflect.DeepEqual(exp6, act6) { - t.Errorf("expected content to see %v, saw %v", exp6, act6) - } -} - -func Test_Merge(t *testing.T) { - map1 := Map{} - - err := map1.Merge("/", Map{"a": "A"}) - if err != nil { - t.Errorf("%v", err) - } - exp1 := Map{ - "a": "A", - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - err = map1.Merge("/a", "a") - if err != nil { - t.Errorf("%v", err) - } - exp2 := Map{ - "a": "a", - } - act2 := map1 - if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } - - err = map1.Merge("/", Map{"a": 1}) - if err != nil { - t.Errorf("%v", err) - } - exp3 := Map{ - "a": 1, - } - act3 := map1 - if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp3, act3) - } - - err = map1.Merge("/", Map{"b": 2}) - if err != nil { - t.Errorf("%v", err) - } - exp4 := Map{ - "a": 1, - "b": 2, - } - act4 := map1 - if !reflect.DeepEqual(exp4, act4) { - t.Errorf("expected content to see %v, saw %v", exp4, act4) - } - - err = map1.Merge("/c", 3) - if err != nil { - t.Errorf("%v", err) - } - exp5 := Map{ - "a": 1, - "b": 2, - "c": 3, - } - act5 := map1 - if !reflect.DeepEqual(exp5, act5) { - t.Errorf("expected content to see %v, saw %v", exp5, act5) - } - -} - -func Test_Get(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - val1, err := map1.Get("a/b/c") - if err != nil { - t.Errorf("%v", err) - } - exp1 := "abc" - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - - val2, err := map1.Get("a") - if err != nil { - t.Errorf("%v", err) - } - exp2 := Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - } - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) - } -} - -func Test_Delete(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - err := map1.Delete("a/b/c") - if err != nil { - t.Errorf("%v", err) - } - exp1 := Map{ - "a": Map{ - "b": Map{ - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) - } - -} - -//func Test_Get(t *testing.T) { -// data1 := objx.Map{ -// "a": objx.Map{ -// "b": objx.Map{ -// "c": "abc", -// "d": "abd", -// }, -// "e": []interface{}{ -// "ae1", -// "ae2", -// }, -// }, -// } -// key1 := "/" -// val1, err := Get(data1, key1) -// if err != nil { -// t.Errorf("%v", err) -// } -// exp1 := map[string]interface{}{ -// "a": map[string]interface{}{ -// "b": map[string]interface{}{ -// "c": "abc", -// "d": "abd", -// }, -// "e": []interface{}{ -// "ae1", -// "ae2", -// }, -// }, -// } -// act1 := val1 -// if !reflect.DeepEqual(exp1, act1) { -// t.Errorf("expected content to see %v, saw %v", exp1, act1) -// } -// -// key2 := "/a" -// val2, err := Get(data1, key2) -// if err != nil { -// t.Errorf("%v", err) -// } -// exp2 := map[string]interface{}{ -// "b": map[string]interface{}{ -// "c": "abc", -// "d": "abd", -// }, -// "e": []interface{}{ -// "ae1", -// "ae2", -// }, -// } -// act2 := val2 -// if !reflect.DeepEqual(exp2, act2) { -// t.Errorf("expected content to see %v, saw %v", exp2, act2) -// } -//} - -//func Test_Set(t *testing.T) { -// data := map[string]interface{}{} -// -// data, err := Set(data, "/", map[string]interface{}{"a": 1}, true) -// if err != nil { -// t.Errorf("%v", err) -// } -// -// exp1 := 1 -// act1 := val1 -// if exp1 != act1 { -// t.Errorf("expected content to see %v, saw %v", exp1, act1) -// } -// -// fsm.applySet("/b/bb", map[string]interface{}{"b": 1}, false) -// -// val2, err := fsm.Get("/b") -// if err != nil { -// t.Errorf("%v", err) -// } -// -// exp2 := map[string]interface{}{"bb": map[string]interface{}{"b": 1}} -// act2 := val2.(map[string]interface{}) -// if !reflect.DeepEqual(exp2, act2) { -// t.Errorf("expected content to see %v, saw %v", exp2, act2) -// } -// -// fsm.applySet("/", map[string]interface{}{"a": 1}, false) -// -// val3, err := fsm.Get("/") -// if err != nil { -// t.Errorf("%v", err) -// } -// -// exp3 := map[string]interface{}{"a": 1} -// act3 := val3 -// if !reflect.DeepEqual(exp3, act3) { -// t.Errorf("expected content to see %v, saw %v", exp3, act3) -// } -// -// fsm.applySet("/", map[string]interface{}{"b": 2}, true) -// -// val4, err := fsm.Get("/") -// if err != nil { -// t.Errorf("%v", err) -// } -// -// exp4 := map[string]interface{}{"a": 1, "b": 2} -// act4 := val4 -// if !reflect.DeepEqual(exp4, act4) { -// t.Errorf("expected content to see %v, saw %v", exp4, act4) -// } -//} diff --git a/marshaler/marshaler.go b/marshaler/marshaler.go new file mode 100644 index 0000000..22c615c --- /dev/null +++ b/marshaler/marshaler.go @@ -0,0 +1,186 @@ +package marshaler + +import ( + "bufio" + "bytes" + "encoding/json" + "io" + "io/ioutil" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" +) + +var ( + DefaultContentType = "application/json" +) + +type BlastMarshaler struct{} + +func (*BlastMarshaler) ContentType() string { + return DefaultContentType +} + +func (m *BlastMarshaler) Marshal(v interface{}) ([]byte, error) { + switch v.(type) { + case *protobuf.GetResponse: + var fields map[string]interface{} + if err := json.Unmarshal(v.(*protobuf.GetResponse).Fields, &fields); err != nil { + return nil, err + } + resp := map[string]interface{}{ + "fields": fields, + } + if value, err := json.Marshal(resp); err == nil { + return value, nil + } else { + return nil, err + } + case *protobuf.SearchResponse: + var searchResult map[string]interface{} + if err := json.Unmarshal(v.(*protobuf.SearchResponse).SearchResult, &searchResult); err != nil { + return nil, err + } + resp := map[string]interface{}{ + "search_result": searchResult, + } + if value, err := json.Marshal(resp); err == nil { + return value, nil + } else { + return nil, err + } + case *protobuf.MappingResponse: + var m map[string]interface{} + if err := json.Unmarshal(v.(*protobuf.MappingResponse).Mapping, &m); err != nil { + return nil, err + } + resp := map[string]interface{}{ + "mapping": m, + } + if value, err := json.Marshal(resp); err == nil { + return value, nil + } else { + return nil, err + } + case *protobuf.MetricsResponse: + value := v.(*protobuf.MetricsResponse).Metrics + return value, nil + default: + return json.Marshal(v) + } +} + +func (m *BlastMarshaler) Unmarshal(data []byte, v interface{}) error { + switch v.(type) { + case *protobuf.SetRequest: + var m map[string]interface{} + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + if i, ok := m["id"].(string); ok { + v.(*protobuf.SetRequest).Id = i + } + + if f, ok := m["fields"].(map[string]interface{}); ok { + fieldsBytes, err := json.Marshal(f) + if err != nil { + return err + } + v.(*protobuf.SetRequest).Fields = fieldsBytes + } + return nil + case *protobuf.BulkIndexRequest: + v.(*protobuf.BulkIndexRequest).Requests = make([]*protobuf.SetRequest, 0) + + reader := bufio.NewReader(bytes.NewReader(data)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + if err := m.Unmarshal(docBytes, r); err != nil { + continue + } + v.(*protobuf.BulkIndexRequest).Requests = append(v.(*protobuf.BulkIndexRequest).Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + if err := m.Unmarshal(docBytes, r); err != nil { + continue + } + v.(*protobuf.BulkIndexRequest).Requests = append(v.(*protobuf.BulkIndexRequest).Requests, r) + } + } + return nil + case *protobuf.BulkDeleteRequest: + v.(*protobuf.BulkDeleteRequest).Requests = make([]*protobuf.DeleteRequest, 0) + + reader := bufio.NewReader(bytes.NewReader(data)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + v.(*protobuf.BulkDeleteRequest).Requests = append(v.(*protobuf.BulkDeleteRequest).Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + v.(*protobuf.BulkDeleteRequest).Requests = append(v.(*protobuf.BulkDeleteRequest).Requests, r) + } + } + return nil + case *protobuf.SearchRequest: + var m map[string]interface{} + if err := json.Unmarshal(data, &m); err != nil { + return err + } + f, ok := m["search_request"] + if !ok { + return errors.ErrNil + } + searchRequestBytes, err := json.Marshal(f) + if err != nil { + return err + } + v.(*protobuf.SearchRequest).SearchRequest = searchRequestBytes + return nil + default: + return json.Unmarshal(data, v) + } +} + +func (m *BlastMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + return m.Unmarshal(buffer, v) + }, + ) +} + +func (m *BlastMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +func (m *BlastMarshaler) Delimiter() []byte { + return []byte("\n") +} diff --git a/marshaler/util.go b/marshaler/util.go new file mode 100644 index 0000000..e935b8b --- /dev/null +++ b/marshaler/util.go @@ -0,0 +1,69 @@ +package marshaler + +import ( + "encoding/json" + "reflect" + + "github.com/golang/protobuf/ptypes/any" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/registry" +) + +func init() { + registry.RegisterType("protobuf.LivenessCheckResponse", reflect.TypeOf(protobuf.LivenessCheckResponse{})) + registry.RegisterType("protobuf.ReadinessCheckResponse", reflect.TypeOf(protobuf.ReadinessCheckResponse{})) + registry.RegisterType("protobuf.Metadata", reflect.TypeOf(protobuf.Metadata{})) + registry.RegisterType("protobuf.Node", reflect.TypeOf(protobuf.Node{})) + registry.RegisterType("protobuf.Cluster", reflect.TypeOf(protobuf.Cluster{})) + registry.RegisterType("protobuf.JoinRequest", reflect.TypeOf(protobuf.JoinRequest{})) + registry.RegisterType("protobuf.LeaveRequest", reflect.TypeOf(protobuf.LeaveRequest{})) + registry.RegisterType("protobuf.NodeResponse", reflect.TypeOf(protobuf.NodeResponse{})) + registry.RegisterType("protobuf.ClusterResponse", reflect.TypeOf(protobuf.ClusterResponse{})) + registry.RegisterType("protobuf.GetRequest", reflect.TypeOf(protobuf.GetRequest{})) + registry.RegisterType("protobuf.GetResponse", reflect.TypeOf(protobuf.GetResponse{})) + registry.RegisterType("protobuf.SetRequest", reflect.TypeOf(protobuf.SetRequest{})) + registry.RegisterType("protobuf.DeleteRequest", reflect.TypeOf(protobuf.DeleteRequest{})) + registry.RegisterType("protobuf.BulkIndexRequest", reflect.TypeOf(protobuf.BulkIndexRequest{})) + registry.RegisterType("protobuf.BulkDeleteRequest", reflect.TypeOf(protobuf.BulkDeleteRequest{})) + registry.RegisterType("protobuf.SetMetadataRequest", reflect.TypeOf(protobuf.SetMetadataRequest{})) + registry.RegisterType("protobuf.DeleteMetadataRequest", reflect.TypeOf(protobuf.DeleteMetadataRequest{})) + registry.RegisterType("protobuf.Event", reflect.TypeOf(protobuf.Event{})) + registry.RegisterType("protobuf.WatchResponse", reflect.TypeOf(protobuf.WatchResponse{})) + registry.RegisterType("protobuf.MetricsResponse", reflect.TypeOf(protobuf.MetricsResponse{})) + registry.RegisterType("protobuf.Document", reflect.TypeOf(protobuf.Document{})) + registry.RegisterType("map[string]interface {}", reflect.TypeOf((map[string]interface{})(nil))) +} + +func MarshalAny(message *any.Any) (interface{}, error) { + if message == nil { + return nil, nil + } + + typeUrl := message.TypeUrl + value := message.Value + + instance := registry.TypeInstanceByName(typeUrl) + + if err := json.Unmarshal(value, instance); err != nil { + return nil, err + } else { + return instance, nil + } + +} + +func UnmarshalAny(instance interface{}, message *any.Any) error { + if instance == nil { + return nil + } + + value, err := json.Marshal(instance) + if err != nil { + return err + } + + message.TypeUrl = registry.TypeNameByInstance(instance) + message.Value = value + + return nil +} diff --git a/marshaler/util_test.go b/marshaler/util_test.go new file mode 100644 index 0000000..da72cd4 --- /dev/null +++ b/marshaler/util_test.go @@ -0,0 +1,109 @@ +package marshaler + +import ( + "bytes" + "testing" + + "github.com/golang/protobuf/ptypes/any" + "github.com/mosuka/blast/protobuf" +) + +func TestMarshalAny(t *testing.T) { + // test map[string]interface{} + data := map[string]interface{}{"a": 1, "b": 2, "c": 3} + + mapAny := &any.Any{} + err := UnmarshalAny(data, mapAny) + if err != nil { + t.Errorf("%v", err) + } + + expectedType := "map[string]interface {}" + actualType := mapAny.TypeUrl + if expectedType != actualType { + t.Errorf("expected content to see %s, saw %s", expectedType, actualType) + } + + expectedValue := []byte(`{"a":1,"b":2,"c":3}`) + actualValue := mapAny.Value + if !bytes.Equal(expectedValue, actualValue) { + t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + } + + // test kvs.Node + node := &protobuf.Node{ + RaftAddress: ":7000", + State: "Leader", + Metadata: &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + }, + } + + nodeAny := &any.Any{} + err = UnmarshalAny(node, nodeAny) + if err != nil { + t.Errorf("%v", err) + } + + expectedType = "protobuf.Node" + actualType = nodeAny.TypeUrl + if expectedType != actualType { + t.Errorf("expected content to see %s, saw %s", expectedType, actualType) + } + + expectedValue = []byte(`{"raft_address":":7000","metadata":{"grpc_address":":9000","http_address":":8000"},"state":"Leader"}`) + actualValue = nodeAny.Value + if !bytes.Equal(expectedValue, actualValue) { + t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + } +} + +func TestUnmarshalAny(t *testing.T) { + // test map[string]interface{} + dataAny := &any.Any{ + TypeUrl: "map[string]interface {}", + Value: []byte(`{"a":1,"b":2,"c":3}`), + } + + data, err := MarshalAny(dataAny) + if err != nil { + t.Errorf("%v", err) + } + dataMap := *data.(*map[string]interface{}) + + if dataMap["a"] != float64(1) { + t.Errorf("expected content to see %v, saw %v", 1, dataMap["a"]) + } + if dataMap["b"] != float64(2) { + t.Errorf("expected content to see %v, saw %v", 2, dataMap["b"]) + } + if dataMap["c"] != float64(3) { + t.Errorf("expected content to see %v, saw %v", 3, dataMap["c"]) + } + + // raft.Node + dataAny = &any.Any{ + TypeUrl: "protobuf.Node", + Value: []byte(`{"raft_address":":7000","metadata":{"grpc_address":":9000","http_address":":8000"},"state":"Leader"}`), + } + + data, err = MarshalAny(dataAny) + if err != nil { + t.Errorf("%v", err) + } + node := data.(*protobuf.Node) + + if node.RaftAddress != ":7000" { + t.Errorf("expected content to see %v, saw %v", ":7000", node.RaftAddress) + } + if node.Metadata.GrpcAddress != ":9000" { + t.Errorf("expected content to see %v, saw %v", ":9000", node.Metadata.GrpcAddress) + } + if node.Metadata.HttpAddress != ":8000" { + t.Errorf("expected content to see %v, saw %v", ":8000", node.Metadata.HttpAddress) + } + if node.State != "Leader" { + t.Errorf("expected content to see %v, saw %v", "Leader", node.State) + } +} diff --git a/metric/metric.go b/metric/metric.go new file mode 100644 index 0000000..9e6ba20 --- /dev/null +++ b/metric/metric.go @@ -0,0 +1,895 @@ +package metric + +import ( + grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // Create a metrics registry. + Registry = prometheus.NewRegistry() + + // Create some standard server metrics. + GrpcMetrics = grpcprometheus.NewServerMetrics( + func(o *prometheus.CounterOpts) { + o.Namespace = "blast" + }, + ) + + // Raft node state metric + RaftStateMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "state", + Help: "Node state. 0:Follower, 1:Candidate, 2:Leader, 3:Shutdown", + }, []string{"id"}) + + RaftTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "term", + Help: "Term.", + }, []string{"id"}) + + RaftLastLogIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_log_index", + Help: "Last log index.", + }, []string{"id"}) + + RaftLastLogTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_log_term", + Help: "Last log term.", + }, []string{"id"}) + + RaftCommitIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "commit_index", + Help: "Commit index.", + }, []string{"id"}) + + RaftAppliedIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "applied_index", + Help: "Applied index.", + }, []string{"id"}) + + RaftFsmPendingMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "fsm_pending", + Help: "FSM pending.", + }, []string{"id"}) + + RaftLastSnapshotIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_snapshot_index", + Help: "Last snapshot index.", + }, []string{"id"}) + + RaftLastSnapshotTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_snapshot_term", + Help: "Last snapshot term.", + }, []string{"id"}) + + RaftLatestConfigurationIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "latest_configuration_index", + Help: "Latest configuration index.", + }, []string{"id"}) + + RaftNumPeersMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "num_peers", + Help: "Number of peers.", + }, []string{"id"}) + + RaftLastContactMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_copntact", + Help: "Last contact.", + }, []string{"id"}) + + RaftNumNodesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "num_nodes", + Help: "Number of nodes.", + }, []string{"id"}) + + IndexCurOnDiskBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "cur_on_disk_bytes", + Help: "cur_on_disk_bytes", + }, []string{"id"}) + + IndexCurOnDiskFilesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "cur_on_disk_files", + Help: "cur_on_disk_files", + }, []string{"id"}) + + IndexCurRootEpochMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "cur_root_epoch", + Help: "cur_root_epoch", + }, []string{"id"}) + + IndexLastMergedEpochMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "last_merged_epoch", + Help: "last_merged_epoch", + }, []string{"id"}) + + IndexLastPersistedEpochMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "last_persisted_epoch", + Help: "last_persisted_epoch", + }, []string{"id"}) + + IndexMaxBatchIntroTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "max_batch_intro_time", + Help: "max_batch_intro_time", + }, []string{"id"}) + + IndexMaxFileMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "max_file_merge_zap_time", + Help: "max_file_merge_zap_time", + }, []string{"id"}) + + IndexMaxMemMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "max_mem_merge_zap_time", + Help: "max_mem_merge_zap_time", + }, []string{"id"}) + + IndexTotAnalysisTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_analysis_time", + Help: "tot_analysis_time", + }, []string{"id"}) + + IndexTotBatchIntroTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_batch_intro_time", + Help: "tot_batch_intro_time", + }, []string{"id"}) + + IndexTotBatchesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_batches", + Help: "tot_batches", + }, []string{"id"}) + + IndexTotBatchesEmptyMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_batches_empty", + Help: "tot_batches_empty", + }, []string{"id"}) + + IndexTotDeletesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_deletes", + Help: "tot_deletes", + }, []string{"id"}) + + IndexTotFileMergeIntroductionsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_introductions", + Help: "tot_file_merge_introductions", + }, []string{"id"}) + + IndexTotFileMergeIntroductionsDoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_introductions_done", + Help: "tot_file_merge_introductions_done", + }, []string{"id"}) + + IndexTotFileMergeIntroductionsSkippedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_introductions_skipped", + Help: "tot_file_merge_introductions_skipped", + }, []string{"id"}) + + IndexTotFileMergeLoopBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_loop_beg", + Help: "tot_file_merge_loop_beg", + }, []string{"id"}) + + IndexTotFileMergeLoopEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_loop_end", + Help: "tot_file_merge_loop_end", + }, []string{"id"}) + + IndexTotFileMergeLoopErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_loop_err", + Help: "tot_file_merge_loop_err", + }, []string{"id"}) + + IndexTotFileMergePlanMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan", + Help: "tot_file_merge_plan", + }, []string{"id"}) + + IndexTotFileMergePlanErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_err", + Help: "tot_file_merge_plan_err", + }, []string{"id"}) + + IndexTotFileMergePlanNoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_none", + Help: "tot_file_merge_plan_none", + }, []string{"id"}) + + IndexTotFileMergePlanOkMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_ok", + Help: "tot_file_merge_plan_ok", + }, []string{"id"}) + + IndexTotFileMergePlanTasksMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks", + Help: "tot_file_merge_plan_tasks", + }, []string{"id"}) + + IndexTotFileMergePlanTasksDoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_done", + Help: "tot_file_merge_plan_tasks_done", + }, []string{"id"}) + + IndexTotFileMergePlanTasksErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_err", + Help: "tot_file_merge_plan_tasks_err", + }, []string{"id"}) + + IndexTotFileMergePlanTasksSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_segments", + Help: "tot_file_merge_plan_tasks_segments", + }, []string{"id"}) + + IndexTotFileMergePlanTasksSegmentsEmptyMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_segments_empty", + Help: "tot_file_merge_plan_tasks_segments_empty", + }, []string{"id"}) + + IndexTotFileMergeSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_segments", + Help: "tot_file_merge_segments", + }, []string{"id"}) + + IndexTotFileMergeSegmentsEmptyMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_segments_empty", + Help: "tot_file_merge_segments_empty", + }, []string{"id"}) + + IndexTotFileMergeWrittenBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_written_bytes", + Help: "tot_file_merge_written_bytes", + }, []string{"id"}) + + IndexTotFileMergeZapBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_zap_beg", + Help: "tot_file_merge_zap_beg", + }, []string{"id"}) + + IndexTotFileMergeZapEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_zap_end", + Help: "tot_file_merge_zap_end", + }, []string{"id"}) + + IndexTotFileMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_zap_time", + Help: "tot_file_merge_zap_time", + }, []string{"id"}) + + IndexTotFileSegmentsAtRootMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_segments_at_root", + Help: "tot_file_segments_at_root", + }, []string{"id"}) + + IndexTotIndexTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_index_time", + Help: "tot_index_time", + }, []string{"id"}) + + IndexTotIndexedPlainTextBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_indexed_plain_text_bytes", + Help: "tot_indexed_plain_text_bytes", + }, []string{"id"}) + + IndexTotIntroduceLoopMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_loop", + Help: "tot_introduce_loop", + }, []string{"id"}) + + IndexTotIntroduceMergeBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_merge_beg", + Help: "tot_introduce_merge_beg", + }, []string{"id"}) + + IndexTotIntroduceMergeEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_merge_end", + Help: "tot_introduce_merge_end", + }, []string{"id"}) + + IndexTotIntroducePersistBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_persist_beg", + Help: "tot_introduce_persist_beg", + }, []string{"id"}) + + IndexTotIntroducePersistEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_persist_end", + Help: "tot_introduce_persist_end", + }, []string{"id"}) + + IndexTotIntroduceRevertBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_revert_beg", + Help: "tot_introduce_revert_beg", + }, []string{"id"}) + + IndexTotIntroduceRevertEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_revert_end", + Help: "tot_introduce_revert_end", + }, []string{"id"}) + + IndexTotIntroduceSegmentBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_segment_beg", + Help: "tot_introduce_segment_beg", + }, []string{"id"}) + + IndexTotIntroduceSegmentEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_segment_end", + Help: "tot_introduce_segment_end", + }, []string{"id"}) + + IndexTotIntroducedItemsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduced_items", + Help: "tot_introduced_items", + }, []string{"id"}) + + IndexTotIntroducedSegmentsBatchMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduced_segments_batch", + Help: "tot_introduced_segments_batch", + }, []string{"id"}) + + IndexTotIntroducedSegmentsMergeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduced_segments_merge", + Help: "tot_introduced_segments_merge", + }, []string{"id"}) + + IndexTotItemsToPersistMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_items_to_persist", + Help: "tot_items_to_persist", + }, []string{"id"}) + + IndexTotMemMergeBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_beg", + Help: "tot_mem_merge_beg", + }, []string{"id"}) + + IndexTotMemMergeDoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_done", + Help: "tot_mem_merge_done", + }, []string{"id"}) + + IndexTotMemMergeErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_err", + Help: "tot_mem_merge_err", + }, []string{"id"}) + + IndexTotMemMergeSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_segments", + Help: "tot_mem_merge_segments", + }, []string{"id"}) + + IndexTotMemMergeZapBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_zap_beg", + Help: "tot_mem_merge_zap_beg", + }, []string{"id"}) + + IndexTotMemMergeZapEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_zap_end", + Help: "tot_mem_merge_zap_end", + }, []string{"id"}) + + IndexTotMemMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_zap_time", + Help: "tot_mem_merge_zap_time", + }, []string{"id"}) + + IndexTotMemorySegmentsAtRootMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_memory_segments_at_root", + Help: "tot_memory_segments_at_root", + }, []string{"id"}) + + IndexTotOnErrorsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_on_errors", + Help: "tot_on_errors", + }, []string{"id"}) + + IndexTotPersistLoopBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_beg", + Help: "tot_persist_loop_beg", + }, []string{"id"}) + + IndexTotPersistLoopEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_end", + Help: "tot_persist_loop_end", + }, []string{"id"}) + + IndexTotPersistLoopErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_err", + Help: "tot_persist_loop_err", + }, []string{"id"}) + + IndexTotPersistLoopProgressMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_progress", + Help: "tot_persist_loop_progress", + }, []string{"id"}) + + IndexTotPersistLoopWaitMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_wait", + Help: "tot_persist_loop_wait", + }, []string{"id"}) + + IndexTotPersistLoopWaitNotifiedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_wait_notified", + Help: "tot_persist_loop_wait_notified", + }, []string{"id"}) + + IndexTotPersistedItemsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persisted_items", + Help: "tot_persisted_items", + }, []string{"id"}) + + IndexTotPersistedSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persisted_segments", + Help: "tot_persisted_segments", + }, []string{"id"}) + + IndexTotPersisterMergerNapBreakMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_merger_nap_break", + Help: "tot_persister_merger_nap_break", + }, []string{"id"}) + + IndexTotPersisterNapPauseCompletedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_nap_pause_completed", + Help: "tot_persister_nap_pause_completed", + }, []string{"id"}) + + IndexTotPersisterSlowMergerPauseMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_slow_merger_pause", + Help: "tot_persister_slow_merger_pause", + }, []string{"id"}) + + IndexTotPersisterSlowMergerResumeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_slow_merger_resume", + Help: "tot_persister_slow_merger_resume", + }, []string{"id"}) + + IndexTotTermSearchersFinishedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_term_searchers_finished", + Help: "tot_term_searchers_finished", + }, []string{"id"}) + + IndexTotTermSearchersStartedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_term_searchers_started", + Help: "tot_term_searchers_started", + }, []string{"id"}) + + IndexTotUpdatesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_updates", + Help: "tot_updates", + }, []string{"id"}) + + IndexAnalysisTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "analysis_time", + Help: "analysis_time", + }, []string{"id"}) + + IndexBatchesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "batches", + Help: "batches", + }, []string{"id"}) + + IndexDeletesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "deletes", + Help: "deletes", + }, []string{"id"}) + + IndexErrorsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "errors", + Help: "errors", + }, []string{"id"}) + + IndexIndexTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "index_time", + Help: "index_time", + }, []string{"id"}) + + IndexNumBytesUsedDiskMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_bytes_used_disk", + Help: "num_bytes_used_disk", + }, []string{"id"}) + + IndexNumFilesOnDiskMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_files_on_disk", + Help: "num_files_on_disk", + }, []string{"id"}) + + IndexNumItemsIntroducedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_items_introduced", + Help: "num_items_introduced", + }, []string{"id"}) + + IndexNumItemsPersistedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_items_persisted", + Help: "num_items_persisted", + }, []string{"id"}) + + IndexNumPersisterNapMergerBreakMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_persister_nap_merger_break", + Help: "num_persister_nap_merger_break", + }, []string{"id"}) + + IndexNumPersisterNapPauseCompletedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_persister_nap_pause_completed", + Help: "num_persister_nap_pause_completed", + }, []string{"id"}) + + IndexNumPlainTextBytesIndexedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_plain_text_bytes_indexed", + Help: "num_plain_text_bytes_indexed", + }, []string{"id"}) + + IndexNumRecsToPersistMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_recs_to_persist", + Help: "num_recs_to_persist", + }, []string{"id"}) + + IndexNumRootFilesegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_root_filesegments", + Help: "num_root_filesegments", + }, []string{"id"}) + + IndexNumRootMemorysegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_root_memorysegments", + Help: "num_root_memorysegments", + }, []string{"id"}) + + IndexTermSearchersFinishedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "term_searchers_finished", + Help: "term_searchers_finished", + }, []string{"id"}) + + IndexTermSearchersStartedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "term_searchers_started", + Help: "term_searchers_started", + }, []string{"id"}) + + IndexTotalCompactionWrittenBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "total_compaction_written_bytes", + Help: "total_compaction_written_bytes", + }, []string{"id"}) + + IndexUpdatesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "updates", + Help: "updates", + }, []string{"id"}) + + SearchTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "search_time", + Help: "search_time", + }, []string{"id"}) + + SearchesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "searches", + Help: "searches", + }, []string{"id"}) +) + +func init() { + // Register standard server metrics and customized metrics to registry. + Registry.MustRegister( + GrpcMetrics, + RaftStateMetric, + RaftTermMetric, + RaftLastLogIndexMetric, + RaftLastLogTermMetric, + RaftCommitIndexMetric, + RaftAppliedIndexMetric, + RaftFsmPendingMetric, + RaftLastSnapshotIndexMetric, + RaftLastSnapshotTermMetric, + RaftLatestConfigurationIndexMetric, + RaftNumPeersMetric, + RaftLastContactMetric, + RaftNumNodesMetric, + IndexCurOnDiskBytesMetric, + IndexCurOnDiskFilesMetric, + IndexCurRootEpochMetric, + IndexLastMergedEpochMetric, + IndexLastPersistedEpochMetric, + IndexMaxBatchIntroTimeMetric, + IndexMaxFileMergeZapTimeMetric, + IndexMaxMemMergeZapTimeMetric, + IndexTotAnalysisTimeMetric, + IndexTotBatchIntroTimeMetric, + IndexTotBatchesMetric, + IndexTotBatchesEmptyMetric, + IndexTotDeletesMetric, + IndexTotFileMergeIntroductionsMetric, + IndexTotFileMergeIntroductionsDoneMetric, + IndexTotFileMergeIntroductionsSkippedMetric, + IndexTotFileMergeLoopBegMetric, + IndexTotFileMergeLoopEndMetric, + IndexTotFileMergeLoopErrMetric, + IndexTotFileMergePlanMetric, + IndexTotFileMergePlanErrMetric, + IndexTotFileMergePlanNoneMetric, + IndexTotFileMergePlanOkMetric, + IndexTotFileMergePlanTasksMetric, + IndexTotFileMergePlanTasksDoneMetric, + IndexTotFileMergePlanTasksErrMetric, + IndexTotFileMergePlanTasksSegmentsMetric, + IndexTotFileMergePlanTasksSegmentsEmptyMetric, + IndexTotFileMergeSegmentsMetric, + IndexTotFileMergeSegmentsEmptyMetric, + IndexTotFileMergeWrittenBytesMetric, + IndexTotFileMergeZapBegMetric, + IndexTotFileMergeZapEndMetric, + IndexTotFileMergeZapTimeMetric, + IndexTotFileSegmentsAtRootMetric, + IndexTotIndexTimeMetric, + IndexTotIndexedPlainTextBytesMetric, + IndexTotIntroduceLoopMetric, + IndexTotIntroduceMergeBegMetric, + IndexTotIntroduceMergeEndMetric, + IndexTotIntroducePersistBegMetric, + IndexTotIntroducePersistEndMetric, + IndexTotIntroduceRevertBegMetric, + IndexTotIntroduceRevertEndMetric, + IndexTotIntroduceSegmentBegMetric, + IndexTotIntroduceSegmentEndMetric, + IndexTotIntroducedItemsMetric, + IndexTotIntroducedSegmentsBatchMetric, + IndexTotIntroducedSegmentsMergeMetric, + IndexTotItemsToPersistMetric, + IndexTotMemMergeBegMetric, + IndexTotMemMergeDoneMetric, + IndexTotMemMergeErrMetric, + IndexTotMemMergeSegmentsMetric, + IndexTotMemMergeZapBegMetric, + IndexTotMemMergeZapEndMetric, + IndexTotMemMergeZapTimeMetric, + IndexTotMemorySegmentsAtRootMetric, + IndexTotOnErrorsMetric, + IndexTotPersistLoopBegMetric, + IndexTotPersistLoopEndMetric, + IndexTotPersistLoopErrMetric, + IndexTotPersistLoopProgressMetric, + IndexTotPersistLoopWaitMetric, + IndexTotPersistLoopWaitNotifiedMetric, + IndexTotPersistedItemsMetric, + IndexTotPersistedSegmentsMetric, + IndexTotPersisterMergerNapBreakMetric, + IndexTotPersisterNapPauseCompletedMetric, + IndexTotPersisterSlowMergerPauseMetric, + IndexTotPersisterSlowMergerResumeMetric, + IndexTotTermSearchersFinishedMetric, + IndexTotTermSearchersStartedMetric, + IndexTotUpdatesMetric, + IndexAnalysisTimeMetric, + IndexBatchesMetric, + IndexDeletesMetric, + IndexErrorsMetric, + IndexIndexTimeMetric, + IndexNumBytesUsedDiskMetric, + IndexNumFilesOnDiskMetric, + IndexNumItemsIntroducedMetric, + IndexNumItemsPersistedMetric, + IndexNumPersisterNapMergerBreakMetric, + IndexNumPersisterNapPauseCompletedMetric, + IndexNumPlainTextBytesIndexedMetric, + IndexNumRecsToPersistMetric, + IndexNumRootFilesegmentsMetric, + IndexNumRootMemorysegmentsMetric, + IndexTermSearchersFinishedMetric, + IndexTermSearchersStartedMetric, + IndexTotalCompactionWrittenBytesMetric, + IndexUpdatesMetric, + SearchTimeMetric, + SearchesMetric, + ) + GrpcMetrics.EnableHandlingTimeHistogram( + func(o *prometheus.HistogramOpts) { + o.Namespace = "blast" + }, + ) +} diff --git a/protobuf/blast.pb.go b/protobuf/blast.pb.go deleted file mode 100644 index 035376f..0000000 --- a/protobuf/blast.pb.go +++ /dev/null @@ -1,1960 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: protobuf/blast.proto - -package protobuf - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - empty "github.com/golang/protobuf/ptypes/empty" - grpc "google.golang.org/grpc" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type LivenessProbeResponse_State int32 - -const ( - LivenessProbeResponse_UNKNOWN LivenessProbeResponse_State = 0 - LivenessProbeResponse_ALIVE LivenessProbeResponse_State = 1 - LivenessProbeResponse_DEAD LivenessProbeResponse_State = 2 -) - -var LivenessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ALIVE", - 2: "DEAD", -} - -var LivenessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "ALIVE": 1, - "DEAD": 2, -} - -func (x LivenessProbeResponse_State) String() string { - return proto.EnumName(LivenessProbeResponse_State_name, int32(x)) -} - -func (LivenessProbeResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{0, 0} -} - -type ReadinessProbeResponse_State int32 - -const ( - ReadinessProbeResponse_UNKNOWN ReadinessProbeResponse_State = 0 - ReadinessProbeResponse_READY ReadinessProbeResponse_State = 1 - ReadinessProbeResponse_NOT_READY ReadinessProbeResponse_State = 2 -) - -var ReadinessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READY", - 2: "NOT_READY", -} - -var ReadinessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "READY": 1, - "NOT_READY": 2, -} - -func (x ReadinessProbeResponse_State) String() string { - return proto.EnumName(ReadinessProbeResponse_State_name, int32(x)) -} - -func (ReadinessProbeResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{1, 0} -} - -type WatchStoreResponse_Command int32 - -const ( - WatchStoreResponse_UNKNOWN WatchStoreResponse_Command = 0 - WatchStoreResponse_SET WatchStoreResponse_Command = 1 - WatchStoreResponse_DELETE WatchStoreResponse_Command = 2 -) - -var WatchStoreResponse_Command_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SET", - 2: "DELETE", -} - -var WatchStoreResponse_Command_value = map[string]int32{ - "UNKNOWN": 0, - "SET": 1, - "DELETE": 2, -} - -func (x WatchStoreResponse_Command) String() string { - return proto.EnumName(WatchStoreResponse_Command_name, int32(x)) -} - -func (WatchStoreResponse_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{12, 0} -} - -// use for health check -type LivenessProbeResponse struct { - State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=protobuf.LivenessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LivenessProbeResponse) Reset() { *m = LivenessProbeResponse{} } -func (m *LivenessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*LivenessProbeResponse) ProtoMessage() {} -func (*LivenessProbeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{0} -} - -func (m *LivenessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LivenessProbeResponse.Unmarshal(m, b) -} -func (m *LivenessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LivenessProbeResponse.Marshal(b, m, deterministic) -} -func (m *LivenessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LivenessProbeResponse.Merge(m, src) -} -func (m *LivenessProbeResponse) XXX_Size() int { - return xxx_messageInfo_LivenessProbeResponse.Size(m) -} -func (m *LivenessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LivenessProbeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LivenessProbeResponse proto.InternalMessageInfo - -func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { - if m != nil { - return m.State - } - return LivenessProbeResponse_UNKNOWN -} - -// use for health check -type ReadinessProbeResponse struct { - State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=protobuf.ReadinessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadinessProbeResponse) Reset() { *m = ReadinessProbeResponse{} } -func (m *ReadinessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ReadinessProbeResponse) ProtoMessage() {} -func (*ReadinessProbeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{1} -} - -func (m *ReadinessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadinessProbeResponse.Unmarshal(m, b) -} -func (m *ReadinessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadinessProbeResponse.Marshal(b, m, deterministic) -} -func (m *ReadinessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadinessProbeResponse.Merge(m, src) -} -func (m *ReadinessProbeResponse) XXX_Size() int { - return xxx_messageInfo_ReadinessProbeResponse.Size(m) -} -func (m *ReadinessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadinessProbeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadinessProbeResponse proto.InternalMessageInfo - -func (m *ReadinessProbeResponse) GetState() ReadinessProbeResponse_State { - if m != nil { - return m.State - } - return ReadinessProbeResponse_UNKNOWN -} - -// use for raft cluster status -type GetNodeRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } -func (m *GetNodeRequest) String() string { return proto.CompactTextString(m) } -func (*GetNodeRequest) ProtoMessage() {} -func (*GetNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{2} -} - -func (m *GetNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNodeRequest.Unmarshal(m, b) -} -func (m *GetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNodeRequest.Marshal(b, m, deterministic) -} -func (m *GetNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNodeRequest.Merge(m, src) -} -func (m *GetNodeRequest) XXX_Size() int { - return xxx_messageInfo_GetNodeRequest.Size(m) -} -func (m *GetNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNodeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNodeRequest proto.InternalMessageInfo - -func (m *GetNodeRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -// use for raft cluster status -type GetNodeResponse struct { - NodeConfig *any.Any `protobuf:"bytes,1,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` - State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } -func (m *GetNodeResponse) String() string { return proto.CompactTextString(m) } -func (*GetNodeResponse) ProtoMessage() {} -func (*GetNodeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{3} -} - -func (m *GetNodeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNodeResponse.Unmarshal(m, b) -} -func (m *GetNodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNodeResponse.Marshal(b, m, deterministic) -} -func (m *GetNodeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNodeResponse.Merge(m, src) -} -func (m *GetNodeResponse) XXX_Size() int { - return xxx_messageInfo_GetNodeResponse.Size(m) -} -func (m *GetNodeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNodeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNodeResponse proto.InternalMessageInfo - -func (m *GetNodeResponse) GetNodeConfig() *any.Any { - if m != nil { - return m.NodeConfig - } - return nil -} - -func (m *GetNodeResponse) GetState() string { - if m != nil { - return m.State - } - return "" -} - -// use for raft cluster status -type SetNodeRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - NodeConfig *any.Any `protobuf:"bytes,2,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetNodeRequest) Reset() { *m = SetNodeRequest{} } -func (m *SetNodeRequest) String() string { return proto.CompactTextString(m) } -func (*SetNodeRequest) ProtoMessage() {} -func (*SetNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{4} -} - -func (m *SetNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNodeRequest.Unmarshal(m, b) -} -func (m *SetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNodeRequest.Marshal(b, m, deterministic) -} -func (m *SetNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNodeRequest.Merge(m, src) -} -func (m *SetNodeRequest) XXX_Size() int { - return xxx_messageInfo_SetNodeRequest.Size(m) -} -func (m *SetNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetNodeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetNodeRequest proto.InternalMessageInfo - -func (m *SetNodeRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *SetNodeRequest) GetNodeConfig() *any.Any { - if m != nil { - return m.NodeConfig - } - return nil -} - -// use for raft cluster status -type DeleteNodeRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteNodeRequest) Reset() { *m = DeleteNodeRequest{} } -func (m *DeleteNodeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteNodeRequest) ProtoMessage() {} -func (*DeleteNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{5} -} - -func (m *DeleteNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteNodeRequest.Unmarshal(m, b) -} -func (m *DeleteNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteNodeRequest.Marshal(b, m, deterministic) -} -func (m *DeleteNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteNodeRequest.Merge(m, src) -} -func (m *DeleteNodeRequest) XXX_Size() int { - return xxx_messageInfo_DeleteNodeRequest.Size(m) -} -func (m *DeleteNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteNodeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteNodeRequest proto.InternalMessageInfo - -func (m *DeleteNodeRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -// use for raft cluster status -type GetClusterResponse struct { - Cluster *any.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } -func (m *GetClusterResponse) String() string { return proto.CompactTextString(m) } -func (*GetClusterResponse) ProtoMessage() {} -func (*GetClusterResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{6} -} - -func (m *GetClusterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetClusterResponse.Unmarshal(m, b) -} -func (m *GetClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetClusterResponse.Marshal(b, m, deterministic) -} -func (m *GetClusterResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetClusterResponse.Merge(m, src) -} -func (m *GetClusterResponse) XXX_Size() int { - return xxx_messageInfo_GetClusterResponse.Size(m) -} -func (m *GetClusterResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetClusterResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetClusterResponse proto.InternalMessageInfo - -func (m *GetClusterResponse) GetCluster() *any.Any { - if m != nil { - return m.Cluster - } - return nil -} - -type GetValueRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetValueRequest) Reset() { *m = GetValueRequest{} } -func (m *GetValueRequest) String() string { return proto.CompactTextString(m) } -func (*GetValueRequest) ProtoMessage() {} -func (*GetValueRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{7} -} - -func (m *GetValueRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetValueRequest.Unmarshal(m, b) -} -func (m *GetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetValueRequest.Marshal(b, m, deterministic) -} -func (m *GetValueRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetValueRequest.Merge(m, src) -} -func (m *GetValueRequest) XXX_Size() int { - return xxx_messageInfo_GetValueRequest.Size(m) -} -func (m *GetValueRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetValueRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetValueRequest proto.InternalMessageInfo - -func (m *GetValueRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type GetValueResponse struct { - Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetValueResponse) Reset() { *m = GetValueResponse{} } -func (m *GetValueResponse) String() string { return proto.CompactTextString(m) } -func (*GetValueResponse) ProtoMessage() {} -func (*GetValueResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{8} -} - -func (m *GetValueResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetValueResponse.Unmarshal(m, b) -} -func (m *GetValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetValueResponse.Marshal(b, m, deterministic) -} -func (m *GetValueResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetValueResponse.Merge(m, src) -} -func (m *GetValueResponse) XXX_Size() int { - return xxx_messageInfo_GetValueResponse.Size(m) -} -func (m *GetValueResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetValueResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetValueResponse proto.InternalMessageInfo - -func (m *GetValueResponse) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type SetValueRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetValueRequest) Reset() { *m = SetValueRequest{} } -func (m *SetValueRequest) String() string { return proto.CompactTextString(m) } -func (*SetValueRequest) ProtoMessage() {} -func (*SetValueRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{9} -} - -func (m *SetValueRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetValueRequest.Unmarshal(m, b) -} -func (m *SetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetValueRequest.Marshal(b, m, deterministic) -} -func (m *SetValueRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetValueRequest.Merge(m, src) -} -func (m *SetValueRequest) XXX_Size() int { - return xxx_messageInfo_SetValueRequest.Size(m) -} -func (m *SetValueRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetValueRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetValueRequest proto.InternalMessageInfo - -func (m *SetValueRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *SetValueRequest) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type DeleteValueRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteValueRequest) Reset() { *m = DeleteValueRequest{} } -func (m *DeleteValueRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteValueRequest) ProtoMessage() {} -func (*DeleteValueRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{10} -} - -func (m *DeleteValueRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteValueRequest.Unmarshal(m, b) -} -func (m *DeleteValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteValueRequest.Marshal(b, m, deterministic) -} -func (m *DeleteValueRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteValueRequest.Merge(m, src) -} -func (m *DeleteValueRequest) XXX_Size() int { - return xxx_messageInfo_DeleteValueRequest.Size(m) -} -func (m *DeleteValueRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteValueRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteValueRequest proto.InternalMessageInfo - -func (m *DeleteValueRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type WatchStoreRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchStoreRequest) Reset() { *m = WatchStoreRequest{} } -func (m *WatchStoreRequest) String() string { return proto.CompactTextString(m) } -func (*WatchStoreRequest) ProtoMessage() {} -func (*WatchStoreRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{11} -} - -func (m *WatchStoreRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchStoreRequest.Unmarshal(m, b) -} -func (m *WatchStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchStoreRequest.Marshal(b, m, deterministic) -} -func (m *WatchStoreRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchStoreRequest.Merge(m, src) -} -func (m *WatchStoreRequest) XXX_Size() int { - return xxx_messageInfo_WatchStoreRequest.Size(m) -} -func (m *WatchStoreRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchStoreRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchStoreRequest proto.InternalMessageInfo - -func (m *WatchStoreRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type WatchStoreResponse struct { - Command WatchStoreResponse_Command `protobuf:"varint,1,opt,name=command,proto3,enum=protobuf.WatchStoreResponse_Command" json:"command,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchStoreResponse) Reset() { *m = WatchStoreResponse{} } -func (m *WatchStoreResponse) String() string { return proto.CompactTextString(m) } -func (*WatchStoreResponse) ProtoMessage() {} -func (*WatchStoreResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{12} -} - -func (m *WatchStoreResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchStoreResponse.Unmarshal(m, b) -} -func (m *WatchStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchStoreResponse.Marshal(b, m, deterministic) -} -func (m *WatchStoreResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchStoreResponse.Merge(m, src) -} -func (m *WatchStoreResponse) XXX_Size() int { - return xxx_messageInfo_WatchStoreResponse.Size(m) -} -func (m *WatchStoreResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WatchStoreResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchStoreResponse proto.InternalMessageInfo - -func (m *WatchStoreResponse) GetCommand() WatchStoreResponse_Command { - if m != nil { - return m.Command - } - return WatchStoreResponse_UNKNOWN -} - -func (m *WatchStoreResponse) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *WatchStoreResponse) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type GetDocumentRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } -func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*GetDocumentRequest) ProtoMessage() {} -func (*GetDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{13} -} - -func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentRequest.Unmarshal(m, b) -} -func (m *GetDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentRequest.Marshal(b, m, deterministic) -} -func (m *GetDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentRequest.Merge(m, src) -} -func (m *GetDocumentRequest) XXX_Size() int { - return xxx_messageInfo_GetDocumentRequest.Size(m) -} -func (m *GetDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDocumentRequest proto.InternalMessageInfo - -func (m *GetDocumentRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type GetDocumentResponse struct { - Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } -func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*GetDocumentResponse) ProtoMessage() {} -func (*GetDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{14} -} - -func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentResponse.Unmarshal(m, b) -} -func (m *GetDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentResponse.Marshal(b, m, deterministic) -} -func (m *GetDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentResponse.Merge(m, src) -} -func (m *GetDocumentResponse) XXX_Size() int { - return xxx_messageInfo_GetDocumentResponse.Size(m) -} -func (m *GetDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDocumentResponse proto.InternalMessageInfo - -func (m *GetDocumentResponse) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type IndexDocumentRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } -func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentRequest) ProtoMessage() {} -func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{15} -} - -func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentRequest.Unmarshal(m, b) -} -func (m *IndexDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentRequest.Marshal(b, m, deterministic) -} -func (m *IndexDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentRequest.Merge(m, src) -} -func (m *IndexDocumentRequest) XXX_Size() int { - return xxx_messageInfo_IndexDocumentRequest.Size(m) -} -func (m *IndexDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexDocumentRequest proto.InternalMessageInfo - -func (m *IndexDocumentRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *IndexDocumentRequest) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type IndexDocumentResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } -func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentResponse) ProtoMessage() {} -func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{16} -} - -func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentResponse.Unmarshal(m, b) -} -func (m *IndexDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentResponse.Marshal(b, m, deterministic) -} -func (m *IndexDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentResponse.Merge(m, src) -} -func (m *IndexDocumentResponse) XXX_Size() int { - return xxx_messageInfo_IndexDocumentResponse.Size(m) -} -func (m *IndexDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexDocumentResponse proto.InternalMessageInfo - -func (m *IndexDocumentResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type DeleteDocumentRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } -func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentRequest) ProtoMessage() {} -func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{17} -} - -func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentRequest.Unmarshal(m, b) -} -func (m *DeleteDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentRequest.Marshal(b, m, deterministic) -} -func (m *DeleteDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentRequest.Merge(m, src) -} -func (m *DeleteDocumentRequest) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentRequest.Size(m) -} -func (m *DeleteDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteDocumentRequest proto.InternalMessageInfo - -func (m *DeleteDocumentRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type DeleteDocumentResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} } -func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentResponse) ProtoMessage() {} -func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{18} -} - -func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentResponse.Unmarshal(m, b) -} -func (m *DeleteDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentResponse.Marshal(b, m, deterministic) -} -func (m *DeleteDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentResponse.Merge(m, src) -} -func (m *DeleteDocumentResponse) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentResponse.Size(m) -} -func (m *DeleteDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteDocumentResponse proto.InternalMessageInfo - -func (m *DeleteDocumentResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type SearchRequest struct { - SearchRequest *any.Any `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchRequest) Reset() { *m = SearchRequest{} } -func (m *SearchRequest) String() string { return proto.CompactTextString(m) } -func (*SearchRequest) ProtoMessage() {} -func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{19} -} - -func (m *SearchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchRequest.Unmarshal(m, b) -} -func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) -} -func (m *SearchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchRequest.Merge(m, src) -} -func (m *SearchRequest) XXX_Size() int { - return xxx_messageInfo_SearchRequest.Size(m) -} -func (m *SearchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SearchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchRequest proto.InternalMessageInfo - -func (m *SearchRequest) GetSearchRequest() *any.Any { - if m != nil { - return m.SearchRequest - } - return nil -} - -type SearchResponse struct { - SearchResult *any.Any `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchResponse) Reset() { *m = SearchResponse{} } -func (m *SearchResponse) String() string { return proto.CompactTextString(m) } -func (*SearchResponse) ProtoMessage() {} -func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{20} -} - -func (m *SearchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchResponse.Unmarshal(m, b) -} -func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) -} -func (m *SearchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchResponse.Merge(m, src) -} -func (m *SearchResponse) XXX_Size() int { - return xxx_messageInfo_SearchResponse.Size(m) -} -func (m *SearchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SearchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchResponse proto.InternalMessageInfo - -func (m *SearchResponse) GetSearchResult() *any.Any { - if m != nil { - return m.SearchResult - } - return nil -} - -type GetIndexConfigResponse struct { - IndexConfig *any.Any `protobuf:"bytes,1,opt,name=index_config,json=indexConfig,proto3" json:"index_config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} } -func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } -func (*GetIndexConfigResponse) ProtoMessage() {} -func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{21} -} - -func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetIndexConfigResponse.Unmarshal(m, b) -} -func (m *GetIndexConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetIndexConfigResponse.Marshal(b, m, deterministic) -} -func (m *GetIndexConfigResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetIndexConfigResponse.Merge(m, src) -} -func (m *GetIndexConfigResponse) XXX_Size() int { - return xxx_messageInfo_GetIndexConfigResponse.Size(m) -} -func (m *GetIndexConfigResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetIndexConfigResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetIndexConfigResponse proto.InternalMessageInfo - -func (m *GetIndexConfigResponse) GetIndexConfig() *any.Any { - if m != nil { - return m.IndexConfig - } - return nil -} - -type GetIndexStatsResponse struct { - IndexStats *any.Any `protobuf:"bytes,1,opt,name=index_stats,json=indexStats,proto3" json:"index_stats,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } -func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } -func (*GetIndexStatsResponse) ProtoMessage() {} -func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{22} -} - -func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetIndexStatsResponse.Unmarshal(m, b) -} -func (m *GetIndexStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetIndexStatsResponse.Marshal(b, m, deterministic) -} -func (m *GetIndexStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetIndexStatsResponse.Merge(m, src) -} -func (m *GetIndexStatsResponse) XXX_Size() int { - return xxx_messageInfo_GetIndexStatsResponse.Size(m) -} -func (m *GetIndexStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetIndexStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetIndexStatsResponse proto.InternalMessageInfo - -func (m *GetIndexStatsResponse) GetIndexStats() *any.Any { - if m != nil { - return m.IndexStats - } - return nil -} - -// use for creating snapshot -type Document struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{23} -} - -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) -} -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) -} -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) -} -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) -} - -var xxx_messageInfo_Document proto.InternalMessageInfo - -func (m *Document) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Document) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -func init() { - proto.RegisterEnum("protobuf.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) - proto.RegisterEnum("protobuf.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) - proto.RegisterEnum("protobuf.WatchStoreResponse_Command", WatchStoreResponse_Command_name, WatchStoreResponse_Command_value) - proto.RegisterType((*LivenessProbeResponse)(nil), "protobuf.LivenessProbeResponse") - proto.RegisterType((*ReadinessProbeResponse)(nil), "protobuf.ReadinessProbeResponse") - proto.RegisterType((*GetNodeRequest)(nil), "protobuf.GetNodeRequest") - proto.RegisterType((*GetNodeResponse)(nil), "protobuf.GetNodeResponse") - proto.RegisterType((*SetNodeRequest)(nil), "protobuf.SetNodeRequest") - proto.RegisterType((*DeleteNodeRequest)(nil), "protobuf.DeleteNodeRequest") - proto.RegisterType((*GetClusterResponse)(nil), "protobuf.GetClusterResponse") - proto.RegisterType((*GetValueRequest)(nil), "protobuf.GetValueRequest") - proto.RegisterType((*GetValueResponse)(nil), "protobuf.GetValueResponse") - proto.RegisterType((*SetValueRequest)(nil), "protobuf.SetValueRequest") - proto.RegisterType((*DeleteValueRequest)(nil), "protobuf.DeleteValueRequest") - proto.RegisterType((*WatchStoreRequest)(nil), "protobuf.WatchStoreRequest") - proto.RegisterType((*WatchStoreResponse)(nil), "protobuf.WatchStoreResponse") - proto.RegisterType((*GetDocumentRequest)(nil), "protobuf.GetDocumentRequest") - proto.RegisterType((*GetDocumentResponse)(nil), "protobuf.GetDocumentResponse") - proto.RegisterType((*IndexDocumentRequest)(nil), "protobuf.IndexDocumentRequest") - proto.RegisterType((*IndexDocumentResponse)(nil), "protobuf.IndexDocumentResponse") - proto.RegisterType((*DeleteDocumentRequest)(nil), "protobuf.DeleteDocumentRequest") - proto.RegisterType((*DeleteDocumentResponse)(nil), "protobuf.DeleteDocumentResponse") - proto.RegisterType((*SearchRequest)(nil), "protobuf.SearchRequest") - proto.RegisterType((*SearchResponse)(nil), "protobuf.SearchResponse") - proto.RegisterType((*GetIndexConfigResponse)(nil), "protobuf.GetIndexConfigResponse") - proto.RegisterType((*GetIndexStatsResponse)(nil), "protobuf.GetIndexStatsResponse") - proto.RegisterType((*Document)(nil), "protobuf.Document") -} - -func init() { proto.RegisterFile("protobuf/blast.proto", fileDescriptor_406ca165ef12c7d5) } - -var fileDescriptor_406ca165ef12c7d5 = []byte{ - // 939 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xeb, 0x6e, 0xe3, 0x44, - 0x18, 0x8d, 0x5d, 0xd2, 0xa4, 0x27, 0x4d, 0x36, 0x3b, 0xa4, 0xa5, 0xeb, 0x5d, 0xd8, 0x68, 0xf6, - 0xc2, 0x72, 0x73, 0x51, 0x01, 0x21, 0xb4, 0x17, 0xe8, 0x26, 0x56, 0x77, 0x69, 0x94, 0x82, 0x1d, - 0x5a, 0x81, 0x84, 0x2a, 0x27, 0x99, 0xb6, 0xd6, 0x26, 0x76, 0x89, 0xc7, 0x2b, 0xfa, 0x8b, 0x7f, - 0xbc, 0x09, 0xcf, 0xc1, 0xab, 0xad, 0x7c, 0xbf, 0xc4, 0xb1, 0x23, 0xed, 0xbf, 0xcc, 0xcc, 0xf9, - 0xce, 0x77, 0x99, 0xc9, 0x39, 0x32, 0x3a, 0xd7, 0x0b, 0x8b, 0x5b, 0x63, 0xe7, 0x62, 0x7f, 0x3c, - 0xd3, 0x6d, 0x2e, 0x7b, 0x4b, 0x52, 0x0f, 0x77, 0xa5, 0x3b, 0x97, 0x96, 0x75, 0x39, 0x63, 0xfb, - 0x11, 0x4c, 0x37, 0x6f, 0x7c, 0x90, 0x74, 0x37, 0x7b, 0xc4, 0xe6, 0xd7, 0x3c, 0x38, 0xa4, 0xff, - 0x60, 0x67, 0x60, 0xbc, 0x65, 0x26, 0xb3, 0xed, 0x5f, 0x16, 0xd6, 0x98, 0xa9, 0xcc, 0xbe, 0xb6, - 0x4c, 0x9b, 0x91, 0xa7, 0xa8, 0xda, 0x5c, 0xe7, 0x6c, 0x4f, 0xe8, 0x0a, 0x4f, 0x5a, 0x07, 0x8f, - 0xe4, 0x30, 0x5c, 0xce, 0xc5, 0xcb, 0x9a, 0x0b, 0x56, 0xfd, 0x18, 0xfa, 0x19, 0xaa, 0xde, 0x9a, - 0x34, 0x50, 0xfb, 0x6d, 0x78, 0x3c, 0x3c, 0x39, 0x1b, 0xb6, 0x2b, 0x64, 0x0b, 0xd5, 0xc3, 0xc1, - 0xeb, 0x53, 0xa5, 0x2d, 0x90, 0x3a, 0x3e, 0xe8, 0x2b, 0x87, 0xfd, 0xb6, 0x48, 0xff, 0x15, 0xb0, - 0xab, 0x32, 0x7d, 0x6a, 0x2c, 0x97, 0xf0, 0x2c, 0x5d, 0xc2, 0xe3, 0xb8, 0x84, 0xfc, 0x80, 0x74, - 0x0d, 0xf2, 0xaa, 0x1a, 0x54, 0xe5, 0xb0, 0xff, 0x7b, 0x5b, 0x20, 0x4d, 0x6c, 0x0d, 0x4f, 0x46, - 0xe7, 0xfe, 0x52, 0xa4, 0x5d, 0xb4, 0x8e, 0x18, 0x1f, 0x5a, 0x53, 0xa6, 0xb2, 0xbf, 0x1c, 0x66, - 0x73, 0xd2, 0x82, 0x68, 0x4c, 0xbd, 0xe4, 0x5b, 0xaa, 0x68, 0x4c, 0xe9, 0x9f, 0xb8, 0x15, 0x21, - 0x82, 0x12, 0xbf, 0x05, 0x4c, 0x6b, 0xca, 0x7a, 0x96, 0x79, 0x61, 0x5c, 0x7a, 0xd0, 0xc6, 0x41, - 0x47, 0xf6, 0x07, 0x1e, 0x97, 0x7b, 0x68, 0xde, 0xa8, 0x09, 0x1c, 0xe9, 0x84, 0x8d, 0x89, 0x1e, - 0x77, 0x50, 0xf0, 0x29, 0x5a, 0x5a, 0x61, 0x01, 0x99, 0x6c, 0xe2, 0x7a, 0xd9, 0xe8, 0x03, 0xdc, - 0xee, 0xb3, 0x19, 0xe3, 0xac, 0xa8, 0xb7, 0x3e, 0xc8, 0x11, 0xe3, 0xbd, 0x99, 0x63, 0x73, 0xb6, - 0x88, 0xda, 0x93, 0x51, 0x9b, 0xf8, 0x5b, 0x85, 0xbd, 0x85, 0x20, 0xfa, 0xc0, 0x9b, 0xd0, 0xa9, - 0x3e, 0x73, 0xa2, 0x44, 0x6d, 0x6c, 0xbc, 0x61, 0x37, 0x41, 0x26, 0xf7, 0x27, 0x7d, 0x81, 0x76, - 0x0c, 0x0a, 0x12, 0x7d, 0x8e, 0xea, 0x5b, 0x77, 0xa3, 0x30, 0x8d, 0x0f, 0xa1, 0x27, 0xb8, 0xa5, - 0x95, 0x25, 0x89, 0x09, 0xc5, 0x72, 0xc2, 0xc7, 0x20, 0xfe, 0x80, 0x4a, 0x0a, 0x7f, 0x84, 0xdb, - 0x67, 0x3a, 0x9f, 0x5c, 0x69, 0xdc, 0x5a, 0x14, 0xc0, 0xfe, 0x17, 0x40, 0x92, 0xb8, 0xa0, 0xc5, - 0x17, 0xa8, 0x4d, 0xac, 0xf9, 0x5c, 0x37, 0xa7, 0xc1, 0x7b, 0x7e, 0x18, 0x17, 0xb3, 0x0c, 0x97, - 0x7b, 0x3e, 0x56, 0x0d, 0x83, 0xc2, 0x44, 0x62, 0x4e, 0x8f, 0x1b, 0xe5, 0x3d, 0x7e, 0x81, 0x5a, - 0xc0, 0x98, 0xfe, 0x3f, 0xd4, 0xb0, 0xa1, 0x29, 0xa3, 0xb6, 0x40, 0x80, 0xcd, 0xbe, 0x32, 0x50, - 0x46, 0x4a, 0x5b, 0xa4, 0x0f, 0xbd, 0xc7, 0xd0, 0xb7, 0x26, 0xce, 0x9c, 0x99, 0x7c, 0xd5, 0x93, - 0xe9, 0xe1, 0xc3, 0x14, 0x2a, 0xe8, 0xf3, 0x4b, 0x6c, 0x5e, 0x18, 0x6c, 0x36, 0xb5, 0x0b, 0xef, - 0x32, 0xc0, 0xd0, 0x11, 0x3a, 0xaf, 0xcd, 0x29, 0xfb, 0xbb, 0x24, 0x59, 0x82, 0x55, 0x5c, 0x83, - 0xf5, 0x2b, 0xec, 0x64, 0x58, 0x83, 0xe2, 0x3a, 0xa8, 0x4e, 0x2c, 0xc7, 0xe4, 0x1e, 0x73, 0x55, - 0xf5, 0x17, 0xf4, 0x53, 0xec, 0xf8, 0x0f, 0xa0, 0xac, 0x65, 0x19, 0xbb, 0x59, 0x60, 0x21, 0xf1, - 0x00, 0x4d, 0x8d, 0xe9, 0x8b, 0xc9, 0x55, 0x48, 0xf8, 0x14, 0x2d, 0xdb, 0xdb, 0x38, 0x5f, 0xf8, - 0x3b, 0x85, 0x43, 0x6a, 0xda, 0xc9, 0x60, 0x7a, 0xec, 0x0a, 0x84, 0xbf, 0x11, 0x64, 0xfd, 0x01, - 0xcd, 0x88, 0xce, 0x76, 0x66, 0xc5, 0x6c, 0xdb, 0x21, 0x9b, 0x8b, 0xa4, 0xbf, 0x62, 0xf7, 0x88, - 0x71, 0x6f, 0x4a, 0xbe, 0x4e, 0x44, 0xa4, 0xdf, 0x63, 0xdb, 0x70, 0xb7, 0xcf, 0x27, 0xe5, 0xaa, - 0xd6, 0x30, 0x62, 0x02, 0x3a, 0xc4, 0x4e, 0x48, 0xe9, 0x2a, 0xaf, 0x1d, 0x31, 0x7e, 0x07, 0x1f, - 0x77, 0xee, 0x0a, 0x5d, 0xf1, 0xbb, 0x80, 0x11, 0x85, 0xd3, 0x57, 0xa8, 0x87, 0x73, 0x7e, 0xbf, - 0xf7, 0x70, 0xf0, 0x1f, 0x50, 0x7d, 0xe9, 0xfa, 0x26, 0xf9, 0x19, 0xcd, 0x94, 0x7f, 0x91, 0xdd, - 0xa5, 0x40, 0xc5, 0xb5, 0x47, 0xe9, 0x7e, 0x89, 0xe1, 0xd1, 0x0a, 0x19, 0xa0, 0x95, 0x36, 0xa2, - 0x95, 0x64, 0xdd, 0x32, 0xeb, 0xa2, 0x15, 0xf2, 0x13, 0x6a, 0x81, 0xbb, 0x90, 0xbd, 0x18, 0x9e, - 0xb6, 0x24, 0xe9, 0x4e, 0xce, 0x49, 0xc4, 0xf0, 0x1c, 0x35, 0x6d, 0x99, 0x21, 0xed, 0x29, 0xd2, - 0x8a, 0x12, 0x69, 0x85, 0xf4, 0x80, 0xd8, 0x27, 0xc8, 0xdd, 0x18, 0xb0, 0xe4, 0x1e, 0x05, 0x24, - 0x7d, 0x20, 0xf6, 0x91, 0x95, 0xf3, 0xb8, 0x97, 0x6a, 0x23, 0xe3, 0x3a, 0xb4, 0x42, 0x5e, 0x61, - 0xdb, 0x93, 0xc4, 0xf7, 0xe4, 0xf9, 0x5a, 0x20, 0xcf, 0x50, 0xd7, 0x4c, 0xfd, 0xda, 0xbe, 0xb2, - 0xf8, 0x4a, 0x96, 0xa2, 0x91, 0xd4, 0x43, 0xab, 0x22, 0xe9, 0xd1, 0x27, 0xad, 0x42, 0x92, 0xf2, - 0x8e, 0xa2, 0x66, 0x7e, 0x44, 0x5d, 0xcb, 0x21, 0xc9, 0x78, 0x58, 0x41, 0x15, 0x0a, 0x1a, 0x09, - 0x7f, 0x22, 0xf7, 0xb2, 0x37, 0xb3, 0x26, 0xcd, 0x31, 0x10, 0xfb, 0x4c, 0xf2, 0x7e, 0x97, 0x4c, - 0x2d, 0x39, 0xd7, 0x65, 0x6b, 0xf2, 0xe6, 0x3a, 0x40, 0x23, 0x21, 0xfe, 0x24, 0x7d, 0x11, 0x19, - 0x19, 0x95, 0x3e, 0x5e, 0x71, 0x1a, 0x8d, 0x68, 0x84, 0x66, 0x4a, 0xaf, 0xc9, 0x27, 0x71, 0x44, - 0x9e, 0x3d, 0x24, 0xff, 0x9d, 0xb9, 0x42, 0x4f, 0x2b, 0x4f, 0x04, 0x72, 0x86, 0x56, 0x5a, 0xad, - 0xc9, 0xfd, 0xec, 0xe8, 0xb2, 0xbc, 0xdd, 0xd5, 0x80, 0x04, 0xf1, 0x73, 0x6c, 0xfa, 0x42, 0x4c, - 0x3e, 0x4a, 0xde, 0x67, 0x42, 0xab, 0xa5, 0xbd, 0xe5, 0x83, 0xa4, 0x6e, 0xa4, 0xa5, 0x77, 0x1d, - 0xdd, 0xc8, 0x17, 0x6b, 0x5a, 0x71, 0x15, 0x2d, 0xa5, 0xba, 0xeb, 0x28, 0x5a, 0xae, 0x4c, 0xd3, - 0xca, 0x4b, 0xfa, 0x47, 0xf7, 0xd2, 0xe0, 0x57, 0xce, 0x58, 0x9e, 0x58, 0xf3, 0xfd, 0xb9, 0x65, - 0x3b, 0x6f, 0x74, 0xff, 0x83, 0x23, 0xfa, 0x7a, 0x18, 0x6f, 0x7a, 0xbf, 0xbe, 0x79, 0x17, 0x00, - 0x00, 0xff, 0xff, 0xd0, 0x9c, 0x8b, 0x11, 0x92, 0x0c, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// BlastClient is the client API for Blast service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type BlastClient interface { - LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) - ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) - GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) - SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) - DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) - GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) - WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Blast_WatchClusterClient, error) - Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) - GetValue(ctx context.Context, in *GetValueRequest, opts ...grpc.CallOption) (*GetValueResponse, error) - SetValue(ctx context.Context, in *SetValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) - DeleteValue(ctx context.Context, in *DeleteValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) - WatchStore(ctx context.Context, in *WatchStoreRequest, opts ...grpc.CallOption) (Blast_WatchStoreClient, error) - GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) - IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_IndexDocumentClient, error) - DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_DeleteDocumentClient, error) - Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) - GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) - GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) -} - -type blastClient struct { - cc *grpc.ClientConn -} - -func NewBlastClient(cc *grpc.ClientConn) BlastClient { - return &blastClient{cc} -} - -func (c *blastClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { - out := new(LivenessProbeResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/LivenessProbe", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { - out := new(ReadinessProbeResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/ReadinessProbe", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { - out := new(GetNodeResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetNode", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/SetNode", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/DeleteNode", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) { - out := new(GetClusterResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetCluster", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Blast_WatchClusterClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[0], "/protobuf.Blast/WatchCluster", opts...) - if err != nil { - return nil, err - } - x := &blastWatchClusterClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Blast_WatchClusterClient interface { - Recv() (*GetClusterResponse, error) - grpc.ClientStream -} - -type blastWatchClusterClient struct { - grpc.ClientStream -} - -func (x *blastWatchClusterClient) Recv() (*GetClusterResponse, error) { - m := new(GetClusterResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *blastClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/Snapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) GetValue(ctx context.Context, in *GetValueRequest, opts ...grpc.CallOption) (*GetValueResponse, error) { - out := new(GetValueResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetValue", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) SetValue(ctx context.Context, in *SetValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/SetValue", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) DeleteValue(ctx context.Context, in *DeleteValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/DeleteValue", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) WatchStore(ctx context.Context, in *WatchStoreRequest, opts ...grpc.CallOption) (Blast_WatchStoreClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[1], "/protobuf.Blast/WatchStore", opts...) - if err != nil { - return nil, err - } - x := &blastWatchStoreClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Blast_WatchStoreClient interface { - Recv() (*WatchStoreResponse, error) - grpc.ClientStream -} - -type blastWatchStoreClient struct { - grpc.ClientStream -} - -func (x *blastWatchStoreClient) Recv() (*WatchStoreResponse, error) { - m := new(WatchStoreResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *blastClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) { - out := new(GetDocumentResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetDocument", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_IndexDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[2], "/protobuf.Blast/IndexDocument", opts...) - if err != nil { - return nil, err - } - x := &blastIndexDocumentClient{stream} - return x, nil -} - -type Blast_IndexDocumentClient interface { - Send(*IndexDocumentRequest) error - CloseAndRecv() (*IndexDocumentResponse, error) - grpc.ClientStream -} - -type blastIndexDocumentClient struct { - grpc.ClientStream -} - -func (x *blastIndexDocumentClient) Send(m *IndexDocumentRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *blastIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(IndexDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *blastClient) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_DeleteDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[3], "/protobuf.Blast/DeleteDocument", opts...) - if err != nil { - return nil, err - } - x := &blastDeleteDocumentClient{stream} - return x, nil -} - -type Blast_DeleteDocumentClient interface { - Send(*DeleteDocumentRequest) error - CloseAndRecv() (*DeleteDocumentResponse, error) - grpc.ClientStream -} - -type blastDeleteDocumentClient struct { - grpc.ClientStream -} - -func (x *blastDeleteDocumentClient) Send(m *DeleteDocumentRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *blastDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(DeleteDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *blastClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { - out := new(SearchResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/Search", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) { - out := new(GetIndexConfigResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetIndexConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) { - out := new(GetIndexStatsResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetIndexStats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BlastServer is the server API for Blast service. -type BlastServer interface { - LivenessProbe(context.Context, *empty.Empty) (*LivenessProbeResponse, error) - ReadinessProbe(context.Context, *empty.Empty) (*ReadinessProbeResponse, error) - GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) - SetNode(context.Context, *SetNodeRequest) (*empty.Empty, error) - DeleteNode(context.Context, *DeleteNodeRequest) (*empty.Empty, error) - GetCluster(context.Context, *empty.Empty) (*GetClusterResponse, error) - WatchCluster(*empty.Empty, Blast_WatchClusterServer) error - Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) - GetValue(context.Context, *GetValueRequest) (*GetValueResponse, error) - SetValue(context.Context, *SetValueRequest) (*empty.Empty, error) - DeleteValue(context.Context, *DeleteValueRequest) (*empty.Empty, error) - WatchStore(*WatchStoreRequest, Blast_WatchStoreServer) error - GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) - IndexDocument(Blast_IndexDocumentServer) error - DeleteDocument(Blast_DeleteDocumentServer) error - Search(context.Context, *SearchRequest) (*SearchResponse, error) - GetIndexConfig(context.Context, *empty.Empty) (*GetIndexConfigResponse, error) - GetIndexStats(context.Context, *empty.Empty) (*GetIndexStatsResponse, error) -} - -func RegisterBlastServer(s *grpc.Server, srv BlastServer) { - s.RegisterService(&_Blast_serviceDesc, srv) -} - -func _Blast_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).LivenessProbe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/LivenessProbe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).LivenessProbe(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).ReadinessProbe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/ReadinessProbe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).ReadinessProbe(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).GetNode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/GetNode", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetNode(ctx, req.(*GetNodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_SetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetNodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).SetNode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/SetNode", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).SetNode(ctx, req.(*SetNodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).DeleteNode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/DeleteNode", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).DeleteNode(ctx, req.(*DeleteNodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).GetCluster(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/GetCluster", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetCluster(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_WatchCluster_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(empty.Empty) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(BlastServer).WatchCluster(m, &blastWatchClusterServer{stream}) -} - -type Blast_WatchClusterServer interface { - Send(*GetClusterResponse) error - grpc.ServerStream -} - -type blastWatchClusterServer struct { - grpc.ServerStream -} - -func (x *blastWatchClusterServer) Send(m *GetClusterResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Blast_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).Snapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/Snapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).Snapshot(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_GetValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetValueRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).GetValue(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/GetValue", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetValue(ctx, req.(*GetValueRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_SetValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetValueRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).SetValue(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/SetValue", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).SetValue(ctx, req.(*SetValueRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_DeleteValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteValueRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).DeleteValue(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/DeleteValue", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).DeleteValue(ctx, req.(*DeleteValueRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_WatchStore_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(WatchStoreRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(BlastServer).WatchStore(m, &blastWatchStoreServer{stream}) -} - -type Blast_WatchStoreServer interface { - Send(*WatchStoreResponse) error - grpc.ServerStream -} - -type blastWatchStoreServer struct { - grpc.ServerStream -} - -func (x *blastWatchStoreServer) Send(m *WatchStoreResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Blast_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetDocumentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).GetDocument(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/GetDocument", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetDocument(ctx, req.(*GetDocumentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_IndexDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(BlastServer).IndexDocument(&blastIndexDocumentServer{stream}) -} - -type Blast_IndexDocumentServer interface { - SendAndClose(*IndexDocumentResponse) error - Recv() (*IndexDocumentRequest, error) - grpc.ServerStream -} - -type blastIndexDocumentServer struct { - grpc.ServerStream -} - -func (x *blastIndexDocumentServer) SendAndClose(m *IndexDocumentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *blastIndexDocumentServer) Recv() (*IndexDocumentRequest, error) { - m := new(IndexDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Blast_DeleteDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(BlastServer).DeleteDocument(&blastDeleteDocumentServer{stream}) -} - -type Blast_DeleteDocumentServer interface { - SendAndClose(*DeleteDocumentResponse) error - Recv() (*DeleteDocumentRequest, error) - grpc.ServerStream -} - -type blastDeleteDocumentServer struct { - grpc.ServerStream -} - -func (x *blastDeleteDocumentServer) SendAndClose(m *DeleteDocumentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *blastDeleteDocumentServer) Recv() (*DeleteDocumentRequest, error) { - m := new(DeleteDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Blast_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SearchRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).Search(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/Search", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).Search(ctx, req.(*SearchRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_GetIndexConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).GetIndexConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/GetIndexConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetIndexConfig(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_GetIndexStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).GetIndexStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/GetIndexStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetIndexStats(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _Blast_serviceDesc = grpc.ServiceDesc{ - ServiceName: "protobuf.Blast", - HandlerType: (*BlastServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LivenessProbe", - Handler: _Blast_LivenessProbe_Handler, - }, - { - MethodName: "ReadinessProbe", - Handler: _Blast_ReadinessProbe_Handler, - }, - { - MethodName: "GetNode", - Handler: _Blast_GetNode_Handler, - }, - { - MethodName: "SetNode", - Handler: _Blast_SetNode_Handler, - }, - { - MethodName: "DeleteNode", - Handler: _Blast_DeleteNode_Handler, - }, - { - MethodName: "GetCluster", - Handler: _Blast_GetCluster_Handler, - }, - { - MethodName: "Snapshot", - Handler: _Blast_Snapshot_Handler, - }, - { - MethodName: "GetValue", - Handler: _Blast_GetValue_Handler, - }, - { - MethodName: "SetValue", - Handler: _Blast_SetValue_Handler, - }, - { - MethodName: "DeleteValue", - Handler: _Blast_DeleteValue_Handler, - }, - { - MethodName: "GetDocument", - Handler: _Blast_GetDocument_Handler, - }, - { - MethodName: "Search", - Handler: _Blast_Search_Handler, - }, - { - MethodName: "GetIndexConfig", - Handler: _Blast_GetIndexConfig_Handler, - }, - { - MethodName: "GetIndexStats", - Handler: _Blast_GetIndexStats_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "WatchCluster", - Handler: _Blast_WatchCluster_Handler, - ServerStreams: true, - }, - { - StreamName: "WatchStore", - Handler: _Blast_WatchStore_Handler, - ServerStreams: true, - }, - { - StreamName: "IndexDocument", - Handler: _Blast_IndexDocument_Handler, - ClientStreams: true, - }, - { - StreamName: "DeleteDocument", - Handler: _Blast_DeleteDocument_Handler, - ClientStreams: true, - }, - }, - Metadata: "protobuf/blast.proto", -} diff --git a/protobuf/blast.proto b/protobuf/blast.proto deleted file mode 100644 index 1980afa..0000000 --- a/protobuf/blast.proto +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; - -package protobuf; - -option go_package = "github.com/mosuka/blast/protobuf"; - -service Blast { - rpc LivenessProbe (google.protobuf.Empty) returns (LivenessProbeResponse) {} - rpc ReadinessProbe (google.protobuf.Empty) returns (ReadinessProbeResponse) {} - - rpc GetNode (GetNodeRequest) returns (GetNodeResponse) {} - rpc SetNode (SetNodeRequest) returns (google.protobuf.Empty) {} - rpc DeleteNode (DeleteNodeRequest) returns (google.protobuf.Empty) {} - rpc GetCluster (google.protobuf.Empty) returns (GetClusterResponse) {} - rpc WatchCluster (google.protobuf.Empty) returns (stream GetClusterResponse) {} - rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} - - rpc GetValue (GetValueRequest) returns (GetValueResponse) {} - rpc SetValue (SetValueRequest) returns (google.protobuf.Empty) {} - rpc DeleteValue (DeleteValueRequest) returns (google.protobuf.Empty) {} - rpc WatchStore (WatchStoreRequest) returns (stream WatchStoreResponse) {} - - rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} - rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} - rpc DeleteDocument (stream DeleteDocumentRequest) returns (DeleteDocumentResponse) {} - rpc Search (SearchRequest) returns (SearchResponse) {} - rpc GetIndexConfig (google.protobuf.Empty) returns (GetIndexConfigResponse) {} - rpc GetIndexStats (google.protobuf.Empty) returns (GetIndexStatsResponse) {} -} - -// use for health check -message LivenessProbeResponse { - enum State { - UNKNOWN = 0; - ALIVE = 1; - DEAD = 2; - } - State state = 1; -} - -// use for health check -message ReadinessProbeResponse { - enum State { - UNKNOWN = 0; - READY = 1; - NOT_READY = 2; - } - State state = 1; -} - -// use for raft cluster status -message GetNodeRequest { - string id = 1; -} - -// use for raft cluster status -message GetNodeResponse { - google.protobuf.Any nodeConfig = 1; - string state = 2; -} - -// use for raft cluster status -message SetNodeRequest { - string id = 1; - google.protobuf.Any nodeConfig = 2; -} - -// use for raft cluster status -message DeleteNodeRequest { - string id = 1; -} - -// use for raft cluster status -message GetClusterResponse { - google.protobuf.Any cluster = 1; -} - -message GetValueRequest { - string key = 1; -} - -message GetValueResponse { - google.protobuf.Any value = 1; -} - -message SetValueRequest { - string key = 1; - google.protobuf.Any value = 2; -} - -message DeleteValueRequest { - string key = 1; -} - -message WatchStoreRequest { - string key = 1; -} - -message WatchStoreResponse { - enum Command { - UNKNOWN = 0; - SET = 1; - DELETE = 2; - } - Command command = 1; - string key = 2; - google.protobuf.Any value = 3; -} - -message GetDocumentRequest { - string id = 1; -} - -message GetDocumentResponse { - google.protobuf.Any fields = 1; -} - -message IndexDocumentRequest { - string id = 1; - google.protobuf.Any fields = 2; -} - -message IndexDocumentResponse { - int32 count = 1; -} - -message DeleteDocumentRequest { - string id = 1; -} - -message DeleteDocumentResponse { - int32 count = 1; -} - -message SearchRequest { - google.protobuf.Any search_request = 1; -} - -message SearchResponse { - google.protobuf.Any search_result = 1; -} - -message GetIndexConfigResponse { - google.protobuf.Any index_config = 1; -} - -message GetIndexStatsResponse { - google.protobuf.Any index_stats = 1; -} - -// use for creating snapshot -message Document { - string id = 1; - google.protobuf.Any fields = 2; -} diff --git a/protobuf/index.pb.go b/protobuf/index.pb.go new file mode 100644 index 0000000..b3197d3 --- /dev/null +++ b/protobuf/index.pb.go @@ -0,0 +1,2676 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.11.4 +// source: protobuf/index.proto + +package protobuf + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type Event_Type int32 + +const ( + Event_Unknown Event_Type = 0 + Event_Join Event_Type = 1 + Event_Leave Event_Type = 2 + Event_Set Event_Type = 3 + Event_Delete Event_Type = 4 + Event_BulkIndex Event_Type = 5 + Event_BulkDelete Event_Type = 6 +) + +// Enum value maps for Event_Type. +var ( + Event_Type_name = map[int32]string{ + 0: "Unknown", + 1: "Join", + 2: "Leave", + 3: "Set", + 4: "Delete", + 5: "BulkIndex", + 6: "BulkDelete", + } + Event_Type_value = map[string]int32{ + "Unknown": 0, + "Join": 1, + "Leave": 2, + "Set": 3, + "Delete": 4, + "BulkIndex": 5, + "BulkDelete": 6, + } +) + +func (x Event_Type) Enum() *Event_Type { + p := new(Event_Type) + *p = x + return p +} + +func (x Event_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Event_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protobuf_index_proto_enumTypes[0].Descriptor() +} + +func (Event_Type) Type() protoreflect.EnumType { + return &file_protobuf_index_proto_enumTypes[0] +} + +func (x Event_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Event_Type.Descriptor instead. +func (Event_Type) EnumDescriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{23, 0} +} + +type LivenessCheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Alive bool `protobuf:"varint,1,opt,name=alive,proto3" json:"alive,omitempty"` +} + +func (x *LivenessCheckResponse) Reset() { + *x = LivenessCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LivenessCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LivenessCheckResponse) ProtoMessage() {} + +func (x *LivenessCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LivenessCheckResponse.ProtoReflect.Descriptor instead. +func (*LivenessCheckResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{0} +} + +func (x *LivenessCheckResponse) GetAlive() bool { + if x != nil { + return x.Alive + } + return false +} + +type ReadinessCheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"` +} + +func (x *ReadinessCheckResponse) Reset() { + *x = ReadinessCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadinessCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadinessCheckResponse) ProtoMessage() {} + +func (x *ReadinessCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadinessCheckResponse.ProtoReflect.Descriptor instead. +func (*ReadinessCheckResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{1} +} + +func (x *ReadinessCheckResponse) GetReady() bool { + if x != nil { + return x.Ready + } + return false +} + +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` + HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{2} +} + +func (x *Metadata) GetGrpcAddress() string { + if x != nil { + return x.GrpcAddress + } + return "" +} + +func (x *Metadata) GetHttpAddress() string { + if x != nil { + return x.HttpAddress + } + return "" +} + +type Node struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RaftAddress string `protobuf:"bytes,1,opt,name=raft_address,json=raftAddress,proto3" json:"raft_address,omitempty"` + Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` +} + +func (x *Node) Reset() { + *x = Node{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Node) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Node) ProtoMessage() {} + +func (x *Node) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Node.ProtoReflect.Descriptor instead. +func (*Node) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{3} +} + +func (x *Node) GetRaftAddress() string { + if x != nil { + return x.RaftAddress + } + return "" +} + +func (x *Node) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Node) GetState() string { + if x != nil { + return x.State + } + return "" +} + +type Cluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` +} + +func (x *Cluster) Reset() { + *x = Cluster{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster) ProtoMessage() {} + +func (x *Cluster) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster.ProtoReflect.Descriptor instead. +func (*Cluster) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{4} +} + +func (x *Cluster) GetNodes() map[string]*Node { + if x != nil { + return x.Nodes + } + return nil +} + +func (x *Cluster) GetLeader() string { + if x != nil { + return x.Leader + } + return "" +} + +type JoinRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` +} + +func (x *JoinRequest) Reset() { + *x = JoinRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JoinRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JoinRequest) ProtoMessage() {} + +func (x *JoinRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JoinRequest.ProtoReflect.Descriptor instead. +func (*JoinRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{5} +} + +func (x *JoinRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *JoinRequest) GetNode() *Node { + if x != nil { + return x.Node + } + return nil +} + +type LeaveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *LeaveRequest) Reset() { + *x = LeaveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeaveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeaveRequest) ProtoMessage() {} + +func (x *LeaveRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeaveRequest.ProtoReflect.Descriptor instead. +func (*LeaveRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{6} +} + +func (x *LeaveRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type NodeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` +} + +func (x *NodeResponse) Reset() { + *x = NodeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeResponse) ProtoMessage() {} + +func (x *NodeResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeResponse.ProtoReflect.Descriptor instead. +func (*NodeResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeResponse) GetNode() *Node { + if x != nil { + return x.Node + } + return nil +} + +type ClusterResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` +} + +func (x *ClusterResponse) Reset() { + *x = ClusterResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterResponse) ProtoMessage() {} + +func (x *ClusterResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterResponse.ProtoReflect.Descriptor instead. +func (*ClusterResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{8} +} + +func (x *ClusterResponse) GetCluster() *Cluster { + if x != nil { + return x.Cluster + } + return nil +} + +type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` +} + +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{9} +} + +func (x *Document) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Document) GetFields() []byte { + if x != nil { + return x.Fields + } + return nil +} + +type GetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *GetRequest) Reset() { + *x = GetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRequest) ProtoMessage() {} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. +func (*GetRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{10} +} + +func (x *GetRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type GetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Fields []byte `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` +} + +func (x *GetResponse) Reset() { + *x = GetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse) ProtoMessage() {} + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. +func (*GetResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{11} +} + +func (x *GetResponse) GetFields() []byte { + if x != nil { + return x.Fields + } + return nil +} + +type SetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` +} + +func (x *SetRequest) Reset() { + *x = SetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetRequest) ProtoMessage() {} + +func (x *SetRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetRequest.ProtoReflect.Descriptor instead. +func (*SetRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{12} +} + +func (x *SetRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SetRequest) GetFields() []byte { + if x != nil { + return x.Fields + } + return nil +} + +type DeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *DeleteRequest) Reset() { + *x = DeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteRequest) ProtoMessage() {} + +func (x *DeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{13} +} + +func (x *DeleteRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type BulkIndexRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Requests []*SetRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` +} + +func (x *BulkIndexRequest) Reset() { + *x = BulkIndexRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BulkIndexRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BulkIndexRequest) ProtoMessage() {} + +func (x *BulkIndexRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BulkIndexRequest.ProtoReflect.Descriptor instead. +func (*BulkIndexRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{14} +} + +func (x *BulkIndexRequest) GetRequests() []*SetRequest { + if x != nil { + return x.Requests + } + return nil +} + +type BulkIndexResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *BulkIndexResponse) Reset() { + *x = BulkIndexResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BulkIndexResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BulkIndexResponse) ProtoMessage() {} + +func (x *BulkIndexResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BulkIndexResponse.ProtoReflect.Descriptor instead. +func (*BulkIndexResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{15} +} + +func (x *BulkIndexResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +type BulkDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Requests []*DeleteRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` +} + +func (x *BulkDeleteRequest) Reset() { + *x = BulkDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BulkDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BulkDeleteRequest) ProtoMessage() {} + +func (x *BulkDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BulkDeleteRequest.ProtoReflect.Descriptor instead. +func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{16} +} + +func (x *BulkDeleteRequest) GetRequests() []*DeleteRequest { + if x != nil { + return x.Requests + } + return nil +} + +type BulkDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *BulkDeleteResponse) Reset() { + *x = BulkDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BulkDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BulkDeleteResponse) ProtoMessage() {} + +func (x *BulkDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BulkDeleteResponse.ProtoReflect.Descriptor instead. +func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{17} +} + +func (x *BulkDeleteResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +type SetMetadataRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *SetMetadataRequest) Reset() { + *x = SetMetadataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetMetadataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetMetadataRequest) ProtoMessage() {} + +func (x *SetMetadataRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetMetadataRequest.ProtoReflect.Descriptor instead. +func (*SetMetadataRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{18} +} + +func (x *SetMetadataRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SetMetadataRequest) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type DeleteMetadataRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *DeleteMetadataRequest) Reset() { + *x = DeleteMetadataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteMetadataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteMetadataRequest) ProtoMessage() {} + +func (x *DeleteMetadataRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteMetadataRequest.ProtoReflect.Descriptor instead. +func (*DeleteMetadataRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{19} +} + +func (x *DeleteMetadataRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type SearchRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SearchRequest []byte `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` +} + +func (x *SearchRequest) Reset() { + *x = SearchRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SearchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchRequest) ProtoMessage() {} + +func (x *SearchRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchRequest.ProtoReflect.Descriptor instead. +func (*SearchRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{20} +} + +func (x *SearchRequest) GetSearchRequest() []byte { + if x != nil { + return x.SearchRequest + } + return nil +} + +type SearchResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SearchResult []byte `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` +} + +func (x *SearchResponse) Reset() { + *x = SearchResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SearchResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchResponse) ProtoMessage() {} + +func (x *SearchResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchResponse.ProtoReflect.Descriptor instead. +func (*SearchResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{21} +} + +func (x *SearchResponse) GetSearchResult() []byte { + if x != nil { + return x.SearchResult + } + return nil +} + +type MappingResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mapping []byte `protobuf:"bytes,1,opt,name=mapping,proto3" json:"mapping,omitempty"` +} + +func (x *MappingResponse) Reset() { + *x = MappingResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MappingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MappingResponse) ProtoMessage() {} + +func (x *MappingResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MappingResponse.ProtoReflect.Descriptor instead. +func (*MappingResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{22} +} + +func (x *MappingResponse) GetMapping() []byte { + if x != nil { + return x.Mapping + } + return nil +} + +type Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Event_Type `protobuf:"varint,1,opt,name=type,proto3,enum=index.Event_Type" json:"type,omitempty"` + Data *any.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Event) Reset() { + *x = Event{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Event.ProtoReflect.Descriptor instead. +func (*Event) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{23} +} + +func (x *Event) GetType() Event_Type { + if x != nil { + return x.Type + } + return Event_Unknown +} + +func (x *Event) GetData() *any.Any { + if x != nil { + return x.Data + } + return nil +} + +type WatchResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *WatchResponse) Reset() { + *x = WatchResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchResponse) ProtoMessage() {} + +func (x *WatchResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchResponse.ProtoReflect.Descriptor instead. +func (*WatchResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{24} +} + +func (x *WatchResponse) GetEvent() *Event { + if x != nil { + return x.Event + } + return nil +} + +type MetricsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Metrics []byte `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"` +} + +func (x *MetricsResponse) Reset() { + *x = MetricsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricsResponse) ProtoMessage() {} + +func (x *MetricsResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. +func (*MetricsResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{25} +} + +func (x *MetricsResponse) GetMetrics() []byte { + if x != nil { + return x.Metrics + } + return nil +} + +var File_protobuf_index_proto protoreflect.FileDescriptor + +var file_protobuf_index_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, + 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x2d, 0x0a, 0x15, 0x4c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x61, 0x6c, 0x69, + 0x76, 0x65, 0x22, 0x2e, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x61, + 0x64, 0x79, 0x22, 0x50, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, + 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x22, 0x6c, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x72, 0x61, 0x66, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x72, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x2b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2f, + 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, + 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x45, 0x0a, 0x0a, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3e, + 0x0a, 0x0b, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, + 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x1e, + 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2f, + 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, + 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, + 0x3b, 0x0a, 0x0f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x28, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x32, 0x0a, 0x08, + 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, + 0x22, 0x1c, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x25, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x34, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x1f, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x41, 0x0a, 0x10, + 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x2d, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, + 0x29, 0x0a, 0x11, 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x45, 0x0a, 0x11, 0x42, 0x75, + 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x30, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x22, 0x2a, 0x0a, 0x12, 0x42, 0x75, 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x51, 0x0a, + 0x12, 0x53, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x27, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x36, 0x0a, 0x0d, 0x53, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, + 0x61, 0x72, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x65, 0x61, 0x72, + 0x63, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x2b, 0x0a, 0x0f, 0x4d, 0x61, 0x70, 0x70, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x61, + 0x70, 0x70, 0x69, 0x6e, 0x67, 0x22, 0xb6, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x25, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x5c, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x6f, 0x69, 0x6e, 0x10, 0x01, 0x12, + 0x09, 0x0a, 0x05, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x65, + 0x74, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x10, 0x04, 0x12, + 0x0d, 0x0a, 0x09, 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x05, 0x12, 0x0e, + 0x0a, 0x0a, 0x42, 0x75, 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x10, 0x06, 0x22, 0x33, + 0x0a, 0x0d, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x22, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x22, 0x2b, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x32, 0xb5, 0x0a, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x61, 0x0a, 0x0d, 0x4c, 0x69, + 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4c, 0x69, 0x76, 0x65, + 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76, 0x31, 0x2f, 0x6c, + 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x64, 0x0a, + 0x0e, 0x52, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, + 0x52, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, + 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x12, 0x45, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x10, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0a, + 0x12, 0x08, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x4a, 0x6f, + 0x69, 0x6e, 0x12, 0x12, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x1e, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x1a, 0x10, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x3a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x4e, + 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x16, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x0d, 0x12, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4e, + 0x0a, 0x05, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x13, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, + 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x2a, 0x10, 0x2f, 0x76, + 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x12, 0x50, + 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x0e, 0x12, 0x0c, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x12, 0x58, 0x0a, 0x09, 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x17, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x42, + 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x1a, 0x0d, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x5b, 0x0a, 0x0a, 0x42, 0x75, + 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x2a, 0x0d, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x4b, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x11, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x12, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, + 0x76, 0x31, 0x2f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x7b, 0x69, 0x64, + 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x52, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x12, 0x11, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x1a, 0x15, + 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x7b, 0x69, + 0x64, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0x55, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x14, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x2a, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x7b, 0x69, 0x64, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0x4c, 0x0a, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, 0x14, 0x2e, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x15, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0f, 0x22, 0x0a, + 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x3a, 0x01, 0x2a, 0x12, 0x4e, 0x0a, + 0x07, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x16, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0d, + 0x12, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x39, 0x0a, + 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4e, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0d, 0x12, 0x0b, 0x2f, 0x76, 0x31, + 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x42, 0x22, 0x5a, 0x20, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x73, 0x75, 0x6b, 0x61, 0x2f, 0x62, 0x6c, + 0x61, 0x73, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_protobuf_index_proto_rawDescOnce sync.Once + file_protobuf_index_proto_rawDescData = file_protobuf_index_proto_rawDesc +) + +func file_protobuf_index_proto_rawDescGZIP() []byte { + file_protobuf_index_proto_rawDescOnce.Do(func() { + file_protobuf_index_proto_rawDescData = protoimpl.X.CompressGZIP(file_protobuf_index_proto_rawDescData) + }) + return file_protobuf_index_proto_rawDescData +} + +var file_protobuf_index_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_protobuf_index_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_protobuf_index_proto_goTypes = []interface{}{ + (Event_Type)(0), // 0: index.Event.Type + (*LivenessCheckResponse)(nil), // 1: index.LivenessCheckResponse + (*ReadinessCheckResponse)(nil), // 2: index.ReadinessCheckResponse + (*Metadata)(nil), // 3: index.Metadata + (*Node)(nil), // 4: index.Node + (*Cluster)(nil), // 5: index.Cluster + (*JoinRequest)(nil), // 6: index.JoinRequest + (*LeaveRequest)(nil), // 7: index.LeaveRequest + (*NodeResponse)(nil), // 8: index.NodeResponse + (*ClusterResponse)(nil), // 9: index.ClusterResponse + (*Document)(nil), // 10: index.Document + (*GetRequest)(nil), // 11: index.GetRequest + (*GetResponse)(nil), // 12: index.GetResponse + (*SetRequest)(nil), // 13: index.SetRequest + (*DeleteRequest)(nil), // 14: index.DeleteRequest + (*BulkIndexRequest)(nil), // 15: index.BulkIndexRequest + (*BulkIndexResponse)(nil), // 16: index.BulkIndexResponse + (*BulkDeleteRequest)(nil), // 17: index.BulkDeleteRequest + (*BulkDeleteResponse)(nil), // 18: index.BulkDeleteResponse + (*SetMetadataRequest)(nil), // 19: index.SetMetadataRequest + (*DeleteMetadataRequest)(nil), // 20: index.DeleteMetadataRequest + (*SearchRequest)(nil), // 21: index.SearchRequest + (*SearchResponse)(nil), // 22: index.SearchResponse + (*MappingResponse)(nil), // 23: index.MappingResponse + (*Event)(nil), // 24: index.Event + (*WatchResponse)(nil), // 25: index.WatchResponse + (*MetricsResponse)(nil), // 26: index.MetricsResponse + nil, // 27: index.Cluster.NodesEntry + (*any.Any)(nil), // 28: google.protobuf.Any + (*empty.Empty)(nil), // 29: google.protobuf.Empty +} +var file_protobuf_index_proto_depIdxs = []int32{ + 3, // 0: index.Node.metadata:type_name -> index.Metadata + 27, // 1: index.Cluster.nodes:type_name -> index.Cluster.NodesEntry + 4, // 2: index.JoinRequest.node:type_name -> index.Node + 4, // 3: index.NodeResponse.node:type_name -> index.Node + 5, // 4: index.ClusterResponse.cluster:type_name -> index.Cluster + 13, // 5: index.BulkIndexRequest.requests:type_name -> index.SetRequest + 14, // 6: index.BulkDeleteRequest.requests:type_name -> index.DeleteRequest + 3, // 7: index.SetMetadataRequest.metadata:type_name -> index.Metadata + 0, // 8: index.Event.type:type_name -> index.Event.Type + 28, // 9: index.Event.data:type_name -> google.protobuf.Any + 24, // 10: index.WatchResponse.event:type_name -> index.Event + 4, // 11: index.Cluster.NodesEntry.value:type_name -> index.Node + 29, // 12: index.Index.LivenessCheck:input_type -> google.protobuf.Empty + 29, // 13: index.Index.ReadinessCheck:input_type -> google.protobuf.Empty + 29, // 14: index.Index.Node:input_type -> google.protobuf.Empty + 6, // 15: index.Index.Join:input_type -> index.JoinRequest + 29, // 16: index.Index.Cluster:input_type -> google.protobuf.Empty + 7, // 17: index.Index.Leave:input_type -> index.LeaveRequest + 29, // 18: index.Index.Snapshot:input_type -> google.protobuf.Empty + 15, // 19: index.Index.BulkIndex:input_type -> index.BulkIndexRequest + 17, // 20: index.Index.BulkDelete:input_type -> index.BulkDeleteRequest + 11, // 21: index.Index.Get:input_type -> index.GetRequest + 13, // 22: index.Index.Set:input_type -> index.SetRequest + 14, // 23: index.Index.Delete:input_type -> index.DeleteRequest + 21, // 24: index.Index.Search:input_type -> index.SearchRequest + 29, // 25: index.Index.Mapping:input_type -> google.protobuf.Empty + 29, // 26: index.Index.Watch:input_type -> google.protobuf.Empty + 29, // 27: index.Index.Metrics:input_type -> google.protobuf.Empty + 1, // 28: index.Index.LivenessCheck:output_type -> index.LivenessCheckResponse + 2, // 29: index.Index.ReadinessCheck:output_type -> index.ReadinessCheckResponse + 8, // 30: index.Index.Node:output_type -> index.NodeResponse + 29, // 31: index.Index.Join:output_type -> google.protobuf.Empty + 9, // 32: index.Index.Cluster:output_type -> index.ClusterResponse + 29, // 33: index.Index.Leave:output_type -> google.protobuf.Empty + 29, // 34: index.Index.Snapshot:output_type -> google.protobuf.Empty + 16, // 35: index.Index.BulkIndex:output_type -> index.BulkIndexResponse + 18, // 36: index.Index.BulkDelete:output_type -> index.BulkDeleteResponse + 12, // 37: index.Index.Get:output_type -> index.GetResponse + 29, // 38: index.Index.Set:output_type -> google.protobuf.Empty + 29, // 39: index.Index.Delete:output_type -> google.protobuf.Empty + 22, // 40: index.Index.Search:output_type -> index.SearchResponse + 23, // 41: index.Index.Mapping:output_type -> index.MappingResponse + 25, // 42: index.Index.Watch:output_type -> index.WatchResponse + 26, // 43: index.Index.Metrics:output_type -> index.MetricsResponse + 28, // [28:44] is the sub-list for method output_type + 12, // [12:28] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_protobuf_index_proto_init() } +func file_protobuf_index_proto_init() { + if File_protobuf_index_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_protobuf_index_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LivenessCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadinessCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Node); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JoinRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkIndexRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkIndexResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SearchRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SearchResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MappingResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protobuf_index_proto_rawDesc, + NumEnums: 1, + NumMessages: 27, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_protobuf_index_proto_goTypes, + DependencyIndexes: file_protobuf_index_proto_depIdxs, + EnumInfos: file_protobuf_index_proto_enumTypes, + MessageInfos: file_protobuf_index_proto_msgTypes, + }.Build() + File_protobuf_index_proto = out.File + file_protobuf_index_proto_rawDesc = nil + file_protobuf_index_proto_goTypes = nil + file_protobuf_index_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// IndexClient is the client API for Index service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IndexClient interface { + LivenessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessCheckResponse, error) + ReadinessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessCheckResponse, error) + Node(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeResponse, error) + Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Cluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterResponse, error) + Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) + BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) + BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) + Mapping(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MappingResponse, error) + Watch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClient, error) + Metrics(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MetricsResponse, error) +} + +type indexClient struct { + cc grpc.ClientConnInterface +} + +func NewIndexClient(cc grpc.ClientConnInterface) IndexClient { + return &indexClient{cc} +} + +func (c *indexClient) LivenessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessCheckResponse, error) { + out := new(LivenessCheckResponse) + err := c.cc.Invoke(ctx, "/index.Index/LivenessCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) ReadinessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessCheckResponse, error) { + out := new(ReadinessCheckResponse) + err := c.cc.Invoke(ctx, "/index.Index/ReadinessCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Node(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeResponse, error) { + out := new(NodeResponse) + err := c.cc.Invoke(ctx, "/index.Index/Node", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Join", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Cluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterResponse, error) { + out := new(ClusterResponse) + err := c.cc.Invoke(ctx, "/index.Index/Cluster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Leave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Snapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { + out := new(BulkIndexResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkIndex", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { + out := new(BulkDeleteResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/index.Index/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Set", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { + out := new(SearchResponse) + err := c.cc.Invoke(ctx, "/index.Index/Search", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Mapping(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MappingResponse, error) { + out := new(MappingResponse) + err := c.cc.Invoke(ctx, "/index.Index/Mapping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Watch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/Watch", opts...) + if err != nil { + return nil, err + } + x := &indexWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Index_WatchClient interface { + Recv() (*WatchResponse, error) + grpc.ClientStream +} + +type indexWatchClient struct { + grpc.ClientStream +} + +func (x *indexWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *indexClient) Metrics(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MetricsResponse, error) { + out := new(MetricsResponse) + err := c.cc.Invoke(ctx, "/index.Index/Metrics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IndexServer is the server API for Index service. +type IndexServer interface { + LivenessCheck(context.Context, *empty.Empty) (*LivenessCheckResponse, error) + ReadinessCheck(context.Context, *empty.Empty) (*ReadinessCheckResponse, error) + Node(context.Context, *empty.Empty) (*NodeResponse, error) + Join(context.Context, *JoinRequest) (*empty.Empty, error) + Cluster(context.Context, *empty.Empty) (*ClusterResponse, error) + Leave(context.Context, *LeaveRequest) (*empty.Empty, error) + Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) + BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) + BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) + Get(context.Context, *GetRequest) (*GetResponse, error) + Set(context.Context, *SetRequest) (*empty.Empty, error) + Delete(context.Context, *DeleteRequest) (*empty.Empty, error) + Search(context.Context, *SearchRequest) (*SearchResponse, error) + Mapping(context.Context, *empty.Empty) (*MappingResponse, error) + Watch(*empty.Empty, Index_WatchServer) error + Metrics(context.Context, *empty.Empty) (*MetricsResponse, error) +} + +// UnimplementedIndexServer can be embedded to have forward compatible implementations. +type UnimplementedIndexServer struct { +} + +func (*UnimplementedIndexServer) LivenessCheck(context.Context, *empty.Empty) (*LivenessCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LivenessCheck not implemented") +} +func (*UnimplementedIndexServer) ReadinessCheck(context.Context, *empty.Empty) (*ReadinessCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadinessCheck not implemented") +} +func (*UnimplementedIndexServer) Node(context.Context, *empty.Empty) (*NodeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Node not implemented") +} +func (*UnimplementedIndexServer) Join(context.Context, *JoinRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Join not implemented") +} +func (*UnimplementedIndexServer) Cluster(context.Context, *empty.Empty) (*ClusterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Cluster not implemented") +} +func (*UnimplementedIndexServer) Leave(context.Context, *LeaveRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Leave not implemented") +} +func (*UnimplementedIndexServer) Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") +} +func (*UnimplementedIndexServer) BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") +} +func (*UnimplementedIndexServer) BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") +} +func (*UnimplementedIndexServer) Get(context.Context, *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedIndexServer) Set(context.Context, *SetRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} +func (*UnimplementedIndexServer) Delete(context.Context, *DeleteRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedIndexServer) Search(context.Context, *SearchRequest) (*SearchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (*UnimplementedIndexServer) Mapping(context.Context, *empty.Empty) (*MappingResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Mapping not implemented") +} +func (*UnimplementedIndexServer) Watch(*empty.Empty, Index_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} +func (*UnimplementedIndexServer) Metrics(context.Context, *empty.Empty) (*MetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") +} + +func RegisterIndexServer(s *grpc.Server, srv IndexServer) { + s.RegisterService(&_Index_serviceDesc, srv) +} + +func _Index_LivenessCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).LivenessCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/LivenessCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).LivenessCheck(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_ReadinessCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).ReadinessCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/ReadinessCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).ReadinessCheck(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Node_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Node(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Node", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Node(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JoinRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Join(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Join", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Join(ctx, req.(*JoinRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Cluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Cluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Cluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Cluster(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Leave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Leave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Leave(ctx, req.(*LeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Snapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Snapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Snapshot(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).BulkIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkIndex(ctx, req.(*BulkIndexRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).BulkDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Set(ctx, req.(*SetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Search(ctx, req.(*SearchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Mapping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Mapping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Mapping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Mapping(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(empty.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(IndexServer).Watch(m, &indexWatchServer{stream}) +} + +type Index_WatchServer interface { + Send(*WatchResponse) error + grpc.ServerStream +} + +type indexWatchServer struct { + grpc.ServerStream +} + +func (x *indexWatchServer) Send(m *WatchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Index_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Metrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Metrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Metrics(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _Index_serviceDesc = grpc.ServiceDesc{ + ServiceName: "index.Index", + HandlerType: (*IndexServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LivenessCheck", + Handler: _Index_LivenessCheck_Handler, + }, + { + MethodName: "ReadinessCheck", + Handler: _Index_ReadinessCheck_Handler, + }, + { + MethodName: "Node", + Handler: _Index_Node_Handler, + }, + { + MethodName: "Join", + Handler: _Index_Join_Handler, + }, + { + MethodName: "Cluster", + Handler: _Index_Cluster_Handler, + }, + { + MethodName: "Leave", + Handler: _Index_Leave_Handler, + }, + { + MethodName: "Snapshot", + Handler: _Index_Snapshot_Handler, + }, + { + MethodName: "BulkIndex", + Handler: _Index_BulkIndex_Handler, + }, + { + MethodName: "BulkDelete", + Handler: _Index_BulkDelete_Handler, + }, + { + MethodName: "Get", + Handler: _Index_Get_Handler, + }, + { + MethodName: "Set", + Handler: _Index_Set_Handler, + }, + { + MethodName: "Delete", + Handler: _Index_Delete_Handler, + }, + { + MethodName: "Search", + Handler: _Index_Search_Handler, + }, + { + MethodName: "Mapping", + Handler: _Index_Mapping_Handler, + }, + { + MethodName: "Metrics", + Handler: _Index_Metrics_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Index_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "protobuf/index.proto", +} diff --git a/protobuf/index.pb.gw.go b/protobuf/index.pb.gw.go new file mode 100644 index 0000000..810b9ed --- /dev/null +++ b/protobuf/index.pb.gw.go @@ -0,0 +1,1276 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: protobuf/index.proto + +/* +Package protobuf is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package protobuf + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_Index_LivenessCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.LivenessCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_LivenessCheck_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.LivenessCheck(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_ReadinessCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.ReadinessCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_ReadinessCheck_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.ReadinessCheck(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Node_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Node(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Node_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Node(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Join_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq JoinRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Node); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Join(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Join_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq JoinRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Node); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Join(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Cluster_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Cluster(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Cluster_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Cluster(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Leave_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaveRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Leave(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Leave_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaveRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Leave(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Snapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Snapshot(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BulkIndex(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BulkDelete(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Get(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Set_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Set(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Set_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Set(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Delete(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Search(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Mapping_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Mapping(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Mapping_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Mapping(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Metrics_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Metrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Metrics_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Metrics(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterIndexHandlerServer registers the http handlers for service Index to "mux". +// UnaryRPC :call IndexServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterIndexHandlerServer(ctx context.Context, mux *runtime.ServeMux, server IndexServer) error { + + mux.Handle("GET", pattern_Index_LivenessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_LivenessCheck_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_LivenessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_ReadinessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_ReadinessCheck_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_ReadinessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Node_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Node_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Node_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Join_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Cluster_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Cluster_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Cluster_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Leave_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Snapshot_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_BulkIndex_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_BulkDelete_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Get_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Set_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Delete_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Search_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Mapping_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Mapping_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Mapping_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Metrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Metrics_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Metrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterIndexHandlerFromEndpoint is same as RegisterIndexHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterIndexHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterIndexHandler(ctx, mux, conn) +} + +// RegisterIndexHandler registers the http handlers for service Index to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterIndexHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterIndexHandlerClient(ctx, mux, NewIndexClient(conn)) +} + +// RegisterIndexHandlerClient registers the http handlers for service Index +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "IndexClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "IndexClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "IndexClient" to call the correct interceptors. +func RegisterIndexHandlerClient(ctx context.Context, mux *runtime.ServeMux, client IndexClient) error { + + mux.Handle("GET", pattern_Index_LivenessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_LivenessCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_LivenessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_ReadinessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_ReadinessCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_ReadinessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Node_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Node_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Node_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Join_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Cluster_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Cluster_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Cluster_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Leave_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Set_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Search_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Mapping_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Mapping_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Mapping_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Metrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Metrics_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Metrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Index_LivenessCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "liveness_check"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_ReadinessCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "readiness_check"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Node_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "node"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Join_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "cluster", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Cluster_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "cluster"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Leave_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "cluster", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "snapshot"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Set_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Mapping_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "mapping"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Metrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Index_LivenessCheck_0 = runtime.ForwardResponseMessage + + forward_Index_ReadinessCheck_0 = runtime.ForwardResponseMessage + + forward_Index_Node_0 = runtime.ForwardResponseMessage + + forward_Index_Join_0 = runtime.ForwardResponseMessage + + forward_Index_Cluster_0 = runtime.ForwardResponseMessage + + forward_Index_Leave_0 = runtime.ForwardResponseMessage + + forward_Index_Snapshot_0 = runtime.ForwardResponseMessage + + forward_Index_BulkIndex_0 = runtime.ForwardResponseMessage + + forward_Index_BulkDelete_0 = runtime.ForwardResponseMessage + + forward_Index_Get_0 = runtime.ForwardResponseMessage + + forward_Index_Set_0 = runtime.ForwardResponseMessage + + forward_Index_Delete_0 = runtime.ForwardResponseMessage + + forward_Index_Search_0 = runtime.ForwardResponseMessage + + forward_Index_Mapping_0 = runtime.ForwardResponseMessage + + forward_Index_Metrics_0 = runtime.ForwardResponseMessage +) diff --git a/protobuf/index.proto b/protobuf/index.proto new file mode 100644 index 0000000..9464a1d --- /dev/null +++ b/protobuf/index.proto @@ -0,0 +1,223 @@ +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "google/api/annotations.proto"; +//import "protoc-gen-swagger/options/annotations.proto"; + +package index; + +option go_package = "github.com/mosuka/blast/protobuf"; + +service Index { + rpc LivenessCheck (google.protobuf.Empty) returns (LivenessCheckResponse) { + option (google.api.http) = { + get: "/v1/liveness_check" + }; + } + + rpc ReadinessCheck (google.protobuf.Empty) returns (ReadinessCheckResponse) { + option (google.api.http) = { + get: "/v1/readiness_check" + }; + } + + rpc Node (google.protobuf.Empty) returns (NodeResponse) { + option (google.api.http) = { + get: "/v1/node" + }; + } + rpc Join (JoinRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/cluster/{id}" + body: "node" + }; + } + rpc Cluster (google.protobuf.Empty) returns (ClusterResponse) { + option (google.api.http) = { + get: "/v1/cluster" + }; + } + rpc Leave (LeaveRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/cluster/{id}" + }; + } + + rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) { + option (google.api.http) = { + get: "/v1/snapshot" + }; + } + + rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { + option (google.api.http) = { + put: "/v1/documents" + body: "*" + }; + } + rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { + option (google.api.http) = { + delete: "/v1/documents" + body: "*" + }; + } + rpc Get (GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/v1/documents/{id=**}" + }; + } + rpc Set (SetRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/documents/{id=**}" + body: "*" + }; + } + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/documents/{id=**}" + }; + } + rpc Search (SearchRequest) returns (SearchResponse) { + option (google.api.http) = { + post: "/v1/search" + body: "*" + }; + } + + rpc Mapping (google.protobuf.Empty) returns (MappingResponse) { + option (google.api.http) = { + get: "/v1/mapping" + }; + } + + rpc Watch (google.protobuf.Empty) returns (stream WatchResponse) {} + + rpc Metrics (google.protobuf.Empty) returns (MetricsResponse) { + option (google.api.http) = { + get: "/v1/metrics" + }; + } +} + +message LivenessCheckResponse { + bool alive = 1; +} + +message ReadinessCheckResponse { + bool ready = 1; +} + +message Metadata { + string grpc_address = 1; + string http_address = 2; +} + +message Node { + string raft_address = 1; + Metadata metadata = 2; + string state = 3; +} + +message Cluster { + map nodes = 1; + string leader = 2; +} + +message JoinRequest { + string id = 1; + Node node = 2; +} + +message LeaveRequest { + string id = 1; +} + +message NodeResponse { + Node node = 1; +} + +message ClusterResponse { + Cluster cluster = 1; +} + +message Document { + string id = 1; + bytes fields = 2; +} + +message GetRequest { + string id = 1; +} + +message GetResponse { + bytes fields = 1; +} + +message SetRequest { + string id = 1; + bytes fields = 2; +} + +message DeleteRequest { + string id = 1; +} + +message BulkIndexRequest { + repeated SetRequest requests = 1; +} + +message BulkIndexResponse { + int32 count = 1; +} + +message BulkDeleteRequest { + repeated DeleteRequest requests = 1; +} + +message BulkDeleteResponse { + int32 count = 1; +} + +message SetMetadataRequest { + string id = 1; + Metadata metadata = 2; +} + +message DeleteMetadataRequest { + string id = 1; +} + +message SearchRequest { + bytes search_request = 1; +} + +message SearchResponse { + bytes search_result = 1; +} + +message MappingResponse { + bytes mapping = 1; +} + +message Event { + enum Type { + Unknown = 0; + Join = 1; + Leave = 2; + Set = 3; + Delete = 4; + BulkIndex = 5; + BulkDelete = 6; + } + Type type = 1; + google.protobuf.Any data = 2; +} + +message WatchResponse { + Event event = 1; +} + +message MetricsResponse { + bytes metrics = 1; +} diff --git a/protobuf/util.go b/protobuf/util.go deleted file mode 100644 index d3a6ca5..0000000 --- a/protobuf/util.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package protobuf - -import ( - "encoding/json" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/registry" -) - -func MarshalAny(message *any.Any) (interface{}, error) { - if message == nil { - return nil, nil - } - - typeUrl := message.TypeUrl - value := message.Value - - instance := registry.TypeInstanceByName(typeUrl) - - err := json.Unmarshal(value, instance) - if err != nil { - return nil, err - } - - return instance, nil -} - -func UnmarshalAny(instance interface{}, message *any.Any) error { - var err error - - if instance == nil { - return nil - } - - message.TypeUrl = registry.TypeNameByInstance(instance) - - message.Value, err = json.Marshal(instance) - if err != nil { - return err - } - - return nil -} diff --git a/protobuf/util_test.go b/protobuf/util_test.go deleted file mode 100644 index f8fb7e4..0000000 --- a/protobuf/util_test.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package protobuf - -import ( - "bytes" - "testing" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/search/query" - "github.com/golang/protobuf/ptypes/any" -) - -func TestMarshalAny_Slice(t *testing.T) { - data := []interface{}{"a", 1} - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Errorf("%v", err) - } - - expectedType := "[]interface {}" - actualType := dataAny.TypeUrl - if expectedType != actualType { - t.Errorf("expected content to see %s, saw %s", expectedType, actualType) - } - - expectedValue := []byte(`["a",1]`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestMarshalAny_Map(t *testing.T) { - data := map[string]interface{}{"a": 1, "b": 2, "c": 3} - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Errorf("%v", err) - } - - expectedMapType := "map[string]interface {}" - actualMapType := dataAny.TypeUrl - if expectedMapType != actualMapType { - t.Errorf("expected content to see %s, saw %s", expectedMapType, actualMapType) - } - - expectedValue := []byte(`{"a":1,"b":2,"c":3}`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -//func TestMarshalAny_Document(t *testing.T) { -// fieldsMap := map[string]interface{}{"f1": "aaa", "f2": 222, "f3": "ccc"} -// fieldsAny := &any.Any{} -// err := UnmarshalAny(fieldsMap, fieldsAny) -// if err != nil { -// t.Errorf("%v", err) -// } -// -// data := &index.Document{ -// Id: "1", -// Fields: fieldsAny, -// } -// -// dataAny := &any.Any{} -// err = UnmarshalAny(data, dataAny) -// if err != nil { -// t.Errorf("%v", err) -// } -// -// expectedType := "index.Document" -// actualType := dataAny.TypeUrl -// if expectedType != actualType { -// t.Errorf("expected content to see %s, saw %s", expectedType, actualType) -// } -// -// expectedValue := []byte(`{"id":"1","fields":{"type_url":"map[string]interface {}","value":"eyJmMSI6ImFhYSIsImYyIjoyMjIsImYzIjoiY2NjIn0="}}`) -// actualValue := dataAny.Value -// if !bytes.Equal(expectedValue, actualValue) { -// t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) -// } -//} - -//func TestMarshalAny_Node(t *testing.T) { -// data := &raft.Node{ -// Id: "node1", -// Metadata: &raft.Metadata{ -// GrpcAddr: ":5050", -// DataDir: "/tmp/blast/index1", -// BindAddr: ":6060", -// HttpAddr: ":8080", -// Leader: true, -// }, -// } -// -// dataAny := &any.Any{} -// err := UnmarshalAny(data, dataAny) -// if err != nil { -// t.Errorf("%v", err) -// } -// -// expectedType := "raft.Node" -// actualType := dataAny.TypeUrl -// if expectedType != actualType { -// t.Errorf("expected content to see %s, saw %s", expectedType, actualType) -// } -// -// expectedValue := []byte(`{"id":"node1","metadata":{"bind_addr":":6060","grpc_addr":":5050","http_addr":":8080","data_dir":"/tmp/blast/index1","leader":true}}`) -// actualValue := dataAny.Value -// if !bytes.Equal(expectedValue, actualValue) { -// t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) -// } -//} - -func TestMarshalAny_SearchRequest(t *testing.T) { - data := bleve.NewSearchRequest(bleve.NewQueryStringQuery("blast")) - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Errorf("%v", err) - } - - expectedType := "bleve.SearchRequest" - actualType := dataAny.TypeUrl - if expectedType != actualType { - t.Errorf("expected content to see %s, saw %s", expectedType, actualType) - } - - expectedValue := []byte(`{"query":{"query":"blast"},"size":10,"from":0,"highlight":null,"fields":null,"facets":null,"explain":false,"sort":["-_score"],"includeLocations":false}`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestMarshalAny_SearchResult(t *testing.T) { - data := &bleve.SearchResult{ - Total: 10, - } - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Errorf("%v", err) - } - - expectedType := "bleve.SearchResult" - actualType := dataAny.TypeUrl - if expectedType != actualType { - t.Errorf("expected content to see %s, saw %s", expectedType, actualType) - } - - expectedValue := []byte(`{"status":null,"request":null,"hits":null,"total_hits":10,"max_score":0,"took":0,"facets":null}`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestUnmarshalAny_Slice(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "[]interface {}", - Value: []byte(`["a",1]`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Errorf("%v", err) - } - - data := *ins.(*[]interface{}) - - expected1 := "a" - actual1 := data[0] - if expected1 != actual1 { - t.Errorf("expected content to see %v, saw %v", expected1, actual1) - } - - expected2 := float64(1) - actual2 := data[1] - if expected2 != actual2 { - t.Errorf("expected content to see %v, saw %v", expected2, actual2) - } -} - -func TestUnmarshalAny_Map(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "map[string]interface {}", - Value: []byte(`{"a":1,"b":2,"c":3}`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Errorf("%v", err) - } - - data := *ins.(*map[string]interface{}) - - expected1 := float64(1) - actual1 := data["a"] - if expected1 != actual1 { - t.Errorf("expected content to see %v, saw %v", expected1, actual1) - } - - expected2 := float64(2) - actual2 := data["b"] - if expected2 != actual2 { - t.Errorf("expected content to see %v, saw %v", expected2, actual2) - } - - expected3 := float64(3) - actual3 := data["c"] - if expected3 != actual3 { - t.Errorf("expected content to see %v, saw %v", expected3, actual3) - } -} - -//func TestUnmarshalAny_Document(t *testing.T) { -// dataAny := &any.Any{ -// TypeUrl: "index.Document", -// Value: []byte(`{"id":"1","fields":{"type_url":"map[string]interface {}","value":"eyJmMSI6ImFhYSIsImYyIjoyMjIsImYzIjoiY2NjIn0="}}`), -// } -// -// ins, err := MarshalAny(dataAny) -// if err != nil { -// t.Errorf("%v", err) -// } -// -// data := *ins.(*index.Document) -// -// expected1 := "1" -// actual1 := data.Id -// if expected1 != actual1 { -// t.Errorf("expected content to see %v, saw %v", expected1, actual1) -// } -// -// expected2 := "map[string]interface {}" -// actual2 := data.Fields.TypeUrl -// if expected2 != actual2 { -// t.Errorf("expected content to see %v, saw %v", expected2, actual2) -// } -// -// expected3 := []byte(`{"f1":"aaa","f2":222,"f3":"ccc"}`) -// actual3 := data.Fields.Value -// if !bytes.Equal(expected3, actual3) { -// t.Errorf("expected content to see %v, saw %v", expected3, actual3) -// } -//} - -func TestUnmarshalAny_SearchRequest(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "bleve.SearchRequest", - Value: []byte(`{"query":{"query":"blast"},"size":10,"from":0,"highlight":null,"fields":null,"facets":null,"explain":false,"sort":["-_score"],"includeLocations":false}`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Errorf("%v", err) - } - - data := *ins.(*bleve.SearchRequest) - - expected1 := bleve.NewQueryStringQuery("blast").Query - actual1 := data.Query.(*query.QueryStringQuery).Query - if expected1 != actual1 { - t.Errorf("expected content to see %v, saw %v", expected1, actual1) - } -} - -func TestUnmarshalAny_SearchResult(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "bleve.SearchResult", - Value: []byte(`{"status":null,"request":null,"hits":null,"total_hits":10,"max_score":0,"took":0,"facets":null}`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Errorf("%v", err) - } - - data := *ins.(*bleve.SearchResult) - - expected1 := uint64(10) - actual1 := data.Total - if expected1 != actual1 { - t.Errorf("expected content to see %v, saw %v", expected1, actual1) - } -} diff --git a/registry/type.go b/registry/type.go index 5cb1206..7dc13b0 100644 --- a/registry/type.go +++ b/registry/type.go @@ -1,57 +1,11 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package registry import ( "errors" "fmt" "reflect" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" ) -func init() { - RegisterType("bool", reflect.TypeOf(false)) - RegisterType("string", reflect.TypeOf("")) - RegisterType("int", reflect.TypeOf(int(0))) - RegisterType("int8", reflect.TypeOf(int8(0))) - RegisterType("int16", reflect.TypeOf(int16(0))) - RegisterType("int32", reflect.TypeOf(int32(0))) - RegisterType("int64", reflect.TypeOf(int64(0))) - RegisterType("uint", reflect.TypeOf(uint(0))) - RegisterType("uint8", reflect.TypeOf(uint8(0))) - RegisterType("uint16", reflect.TypeOf(uint16(0))) - RegisterType("uint32", reflect.TypeOf(uint32(0))) - RegisterType("uint64", reflect.TypeOf(uint64(0))) - RegisterType("uintptr", reflect.TypeOf(uintptr(0))) - RegisterType("byte", reflect.TypeOf(byte(0))) - RegisterType("rune", reflect.TypeOf(rune(0))) - RegisterType("float32", reflect.TypeOf(float32(0))) - RegisterType("float64", reflect.TypeOf(float64(0))) - RegisterType("complex64", reflect.TypeOf(complex64(0))) - RegisterType("complex128", reflect.TypeOf(complex128(0))) - - RegisterType("map[string]interface {}", reflect.TypeOf((map[string]interface{})(nil))) - RegisterType("[]interface {}", reflect.TypeOf(([]interface{})(nil))) - - RegisterType("mapping.IndexMappingImpl", reflect.TypeOf(mapping.IndexMappingImpl{})) - RegisterType("bleve.SearchRequest", reflect.TypeOf(bleve.SearchRequest{})) - RegisterType("bleve.SearchResult", reflect.TypeOf(bleve.SearchResult{})) -} - type TypeRegistry map[string]reflect.Type var Types = make(TypeRegistry, 0) @@ -68,13 +22,11 @@ func TypeByName(name string) reflect.Type { } func TypeNameByInstance(instance interface{}) string { - switch instance.(type) { - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128: - return reflect.TypeOf(instance).Name() - case map[string]interface{}, []interface{}: - return reflect.TypeOf(instance).String() + switch ins := instance.(type) { + case map[string]interface{}: + return reflect.TypeOf(ins).String() default: - return reflect.TypeOf(instance).Elem().String() + return reflect.TypeOf(ins).Elem().String() } } diff --git a/server/grpc_gateway.go b/server/grpc_gateway.go new file mode 100644 index 0000000..c63572c --- /dev/null +++ b/server/grpc_gateway.go @@ -0,0 +1,161 @@ +package server + +import ( + "context" + "math" + "net" + "net/http" + "time" + + "github.com/golang/protobuf/proto" + "github.com/gorilla/handlers" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" +) + +func responseFilter(ctx context.Context, w http.ResponseWriter, resp proto.Message) error { + switch resp.(type) { + case *protobuf.GetResponse: + w.Header().Set("Content-Type", "application/json") + case *protobuf.MetricsResponse: + w.Header().Set("Content-Type", "text/plain; version=0.0.4; charset=utf-8") + default: + w.Header().Set("Content-Type", marshaler.DefaultContentType) + } + + return nil +} + +type GRPCGateway struct { + httpAddress string + grpcAddress string + + cancel context.CancelFunc + listener net.Listener + mux *runtime.ServeMux + + certificateFile string + keyFile string + + corsAllowedMethods []string + corsAllowedOrigins []string + corsAllowedHeaders []string + + logger *zap.Logger +} + +func NewGRPCGateway(httpAddress string, grpcAddress string, certificateFile string, keyFile string, commonName string, corsAllowedMethods []string, corsAllowedOrigins []string, corsAllowedHeaders []string, logger *zap.Logger) (*GRPCGateway, error) { + dialOpts := []grpc.DialOption{ + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(math.MaxInt64), + grpc.MaxCallRecvMsgSize(math.MaxInt64), + ), + grpc.WithKeepaliveParams( + keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 5 * time.Second, + PermitWithoutStream: true, + }, + ), + } + + baseCtx := context.TODO() + ctx, cancel := context.WithCancel(baseCtx) + + mux := runtime.NewServeMux( + runtime.WithMarshalerOption(runtime.MIMEWildcard, new(marshaler.BlastMarshaler)), + runtime.WithForwardResponseOption(responseFilter), + ) + + if certificateFile == "" { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } else { + creds, err := credentials.NewClientTLSFromFile(certificateFile, commonName) + if err != nil { + return nil, err + } + dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) + } + + err := protobuf.RegisterIndexHandlerFromEndpoint(ctx, mux, grpcAddress, dialOpts) + if err != nil { + logger.Error("failed to register KVS handler from endpoint", zap.Error(err)) + return nil, err + } + + listener, err := net.Listen("tcp", httpAddress) + if err != nil { + logger.Error("failed to create index service", zap.Error(err)) + return nil, err + } + + return &GRPCGateway{ + httpAddress: httpAddress, + grpcAddress: grpcAddress, + listener: listener, + mux: mux, + cancel: cancel, + certificateFile: certificateFile, + keyFile: keyFile, + corsAllowedMethods: corsAllowedMethods, + corsAllowedOrigins: corsAllowedOrigins, + corsAllowedHeaders: corsAllowedHeaders, + logger: logger, + }, nil +} + +func (s *GRPCGateway) Start() error { + corsOpts := make([]handlers.CORSOption, 0) + + if s.corsAllowedMethods != nil && len(s.corsAllowedMethods) > 0 { + corsOpts = append(corsOpts, handlers.AllowedMethods(s.corsAllowedMethods)) + } + if s.corsAllowedOrigins != nil && len(s.corsAllowedOrigins) > 0 { + corsOpts = append(corsOpts, handlers.AllowedMethods(s.corsAllowedOrigins)) + } + if s.corsAllowedHeaders != nil && len(s.corsAllowedHeaders) > 0 { + corsOpts = append(corsOpts, handlers.AllowedMethods(s.corsAllowedHeaders)) + } + + corsMux := handlers.CORS( + corsOpts..., + )(s.mux) + + if s.certificateFile == "" && s.keyFile == "" { + go func() { + if len(corsOpts) > 0 { + _ = http.Serve(s.listener, corsMux) + } else { + _ = http.Serve(s.listener, s.mux) + } + }() + } else { + go func() { + if len(corsOpts) > 0 { + _ = http.ServeTLS(s.listener, corsMux, s.certificateFile, s.keyFile) + } else { + _ = http.ServeTLS(s.listener, s.mux, s.certificateFile, s.keyFile) + } + }() + } + + s.logger.Info("gRPC gateway started", zap.String("http_address", s.httpAddress)) + return nil +} + +func (s *GRPCGateway) Stop() error { + defer s.cancel() + + err := s.listener.Close() + if err != nil { + s.logger.Error("failed to close listener", zap.String("http_address", s.listener.Addr().String()), zap.Error(err)) + } + + s.logger.Info("gRPC gateway stopped", zap.String("http_address", s.httpAddress)) + return nil +} diff --git a/server/grpc_gateway_test.go b/server/grpc_gateway_test.go new file mode 100644 index 0000000..72b36ee --- /dev/null +++ b/server/grpc_gateway_test.go @@ -0,0 +1,38 @@ +package server + +import ( + "fmt" + "testing" + "time" + + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/util" +) + +func Test_GRPCGateway_Start_Stop(t *testing.T) { + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + certificateFile := "" + KeyFile := "" + commonName := "" + corsAllowedMethods := make([]string, 0) + corsAllowedOrigins := make([]string, 0) + corsAllowedHeaders := make([]string, 0) + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + grpcGateway, err := NewGRPCGateway(httpAddress, grpcAddress, certificateFile, KeyFile, commonName, corsAllowedMethods, corsAllowedOrigins, corsAllowedHeaders, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcGateway.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcGateway.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) +} diff --git a/server/grpc_server.go b/server/grpc_server.go new file mode 100644 index 0000000..2320ccb --- /dev/null +++ b/server/grpc_server.go @@ -0,0 +1,133 @@ +package server + +import ( + "math" + "net" + "time" + + grpcmiddleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpczap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/mosuka/blast/metric" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" +) + +type GRPCServer struct { + grpcAddress string + service *GRPCService + server *grpc.Server + listener net.Listener + + certificateFile string + keyFile string + commonName string + + logger *zap.Logger +} + +func NewGRPCServer(grpcAddress string, raftServer *RaftServer, logger *zap.Logger) (*GRPCServer, error) { + return NewGRPCServerWithTLS(grpcAddress, raftServer, "", "", "", logger) +} + +func NewGRPCServerWithTLS(grpcAddress string, raftServer *RaftServer, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCServer, error) { + grpcLogger := logger.Named("grpc") + + opts := []grpc.ServerOption{ + grpc.MaxRecvMsgSize(math.MaxInt64), + grpc.MaxSendMsgSize(math.MaxInt64), + grpc.StreamInterceptor( + grpcmiddleware.ChainStreamServer( + metric.GrpcMetrics.StreamServerInterceptor(), + grpczap.StreamServerInterceptor(grpcLogger), + ), + ), + grpc.UnaryInterceptor( + grpcmiddleware.ChainUnaryServer( + metric.GrpcMetrics.UnaryServerInterceptor(), + grpczap.UnaryServerInterceptor(grpcLogger), + ), + ), + grpc.KeepaliveParams( + keepalive.ServerParameters{ + //MaxConnectionIdle: 0, + //MaxConnectionAge: 0, + //MaxConnectionAgeGrace: 0, + Time: 5 * time.Second, + Timeout: 5 * time.Second, + }, + ), + } + + if certificateFile == "" && keyFile == "" { + logger.Info("disabling TLS") + } else { + logger.Info("enabling TLS") + creds, err := credentials.NewServerTLSFromFile(certificateFile, keyFile) + if err != nil { + logger.Error("failed to create credentials", zap.Error(err)) + } + opts = append(opts, grpc.Creds(creds)) + } + + server := grpc.NewServer( + opts..., + ) + + service, err := NewGRPCService(raftServer, certificateFile, commonName, logger) + if err != nil { + logger.Error("failed to create key value store service", zap.Error(err)) + return nil, err + } + + protobuf.RegisterIndexServer(server, service) + + // Initialize all metrics. + metric.GrpcMetrics.InitializeMetrics(server) + grpc_prometheus.Register(server) + + listener, err := net.Listen("tcp", grpcAddress) + if err != nil { + logger.Error("failed to create listener", zap.String("grpc_address", grpcAddress), zap.Error(err)) + return nil, err + } + + return &GRPCServer{ + grpcAddress: grpcAddress, + service: service, + server: server, + listener: listener, + certificateFile: certificateFile, + keyFile: keyFile, + commonName: commonName, + logger: logger, + }, nil +} + +func (s *GRPCServer) Start() error { + if err := s.service.Start(); err != nil { + s.logger.Error("failed to start service", zap.Error(err)) + } + + go func() { + _ = s.server.Serve(s.listener) + }() + + s.logger.Info("gRPC server started", zap.String("grpc_address", s.grpcAddress)) + return nil +} + +func (s *GRPCServer) Stop() error { + if err := s.service.Stop(); err != nil { + s.logger.Error("failed to stop service", zap.Error(err)) + } + + //s.server.GracefulStop() + s.server.Stop() + + s.logger.Info("gRPC server stopped", zap.String("grpc_address", s.grpcAddress)) + return nil +} diff --git a/server/grpc_server_test.go b/server/grpc_server_test.go new file mode 100644 index 0000000..82e4220 --- /dev/null +++ b/server/grpc_server_test.go @@ -0,0 +1,71 @@ +package server + +import ( + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/util" +) + +func Test_GRPCServer_Start_Stop(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + // Raft server + raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := raftServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + if err := raftServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + // gRPC server + grpcServer, err := NewGRPCServer(grpcAddress, raftServer, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) +} diff --git a/server/grpc_service.go b/server/grpc_service.go new file mode 100644 index 0000000..4d6f0ec --- /dev/null +++ b/server/grpc_service.go @@ -0,0 +1,540 @@ +package server + +import ( + "bytes" + "context" + "encoding/json" + "sync" + "time" + + "github.com/blevesearch/bleve/v2" + "github.com/golang/protobuf/ptypes/empty" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/metric" + "github.com/mosuka/blast/protobuf" + "github.com/prometheus/common/expfmt" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type GRPCService struct { + raftServer *RaftServer + certificateFile string + commonName string + logger *zap.Logger + + watchMutex sync.RWMutex + watchChans map[chan protobuf.WatchResponse]struct{} + + peerClients map[string]*client.GRPCClient + + watchClusterStopCh chan struct{} + watchClusterDoneCh chan struct{} +} + +func NewGRPCService(raftServer *RaftServer, certificateFile string, commonName string, logger *zap.Logger) (*GRPCService, error) { + return &GRPCService{ + raftServer: raftServer, + certificateFile: certificateFile, + commonName: commonName, + logger: logger, + + watchChans: make(map[chan protobuf.WatchResponse]struct{}), + + peerClients: make(map[string]*client.GRPCClient, 0), + + watchClusterStopCh: make(chan struct{}), + watchClusterDoneCh: make(chan struct{}), + }, nil +} + +func (s *GRPCService) Start() error { + go func() { + s.startWatchCluster(500 * time.Millisecond) + }() + + s.logger.Info("gRPC service started") + return nil +} + +func (s *GRPCService) Stop() error { + s.stopWatchCluster() + + s.logger.Info("gRPC service stopped") + return nil +} + +func (s *GRPCService) startWatchCluster(checkInterval time.Duration) { + s.logger.Info("start to update cluster info") + + defer func() { + close(s.watchClusterDoneCh) + }() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + timeout := 60 * time.Second + if err := s.raftServer.WaitForDetectLeader(timeout); err != nil { + if err == errors.ErrTimeout { + s.logger.Error("leader detection timed out", zap.Duration("timeout", timeout), zap.Error(err)) + } else { + s.logger.Error("failed to detect leader", zap.Error(err)) + } + } + + for { + select { + case <-s.watchClusterStopCh: + s.logger.Info("received a request to stop updating a cluster") + return + case event := <-s.raftServer.applyCh: + watchResp := &protobuf.WatchResponse{ + Event: event, + } + for c := range s.watchChans { + c <- *watchResp + } + case <-ticker.C: + s.watchMutex.Lock() + + // open clients for peer nodes + nodes, err := s.raftServer.Nodes() + if err != nil { + s.logger.Warn("failed to get cluster info", zap.String("err", err.Error())) + } + for id, node := range nodes { + if id == s.raftServer.id { + continue + } + + if node.Metadata == nil || node.Metadata.GrpcAddress == "" { + s.logger.Debug("gRPC address missing", zap.String("id", id)) + continue + } + if c, ok := s.peerClients[id]; ok { + if c.Target() != node.Metadata.GrpcAddress { + s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) + delete(s.peerClients, id) + if err := c.Close(); err != nil { + s.logger.Warn("failed to close client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + s.logger.Debug("create client", zap.String("id", id), zap.String("grpc_address", node.Metadata.GrpcAddress)) + if newClient, err := client.NewGRPCClientWithContextTLS(node.Metadata.GrpcAddress, context.TODO(), s.certificateFile, s.commonName); err == nil { + s.peerClients[id] = newClient + } else { + s.logger.Warn("failed to create client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } + } else { + s.logger.Debug("create client", zap.String("id", id), zap.String("grpc_address", node.Metadata.GrpcAddress)) + if newClient, err := client.NewGRPCClientWithContextTLS(node.Metadata.GrpcAddress, context.TODO(), s.certificateFile, s.commonName); err == nil { + s.peerClients[id] = newClient + } else { + s.logger.Warn("failed to create client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } + } + + // close clients for non-existent peer nodes + for id, c := range s.peerClients { + if _, exist := nodes[id]; !exist { + s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) + delete(s.peerClients, id) + if err := c.Close(); err != nil { + s.logger.Warn("failed to close old client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } + } + + s.watchMutex.Unlock() + } + } +} + +func (s *GRPCService) stopWatchCluster() { + if s.watchClusterStopCh != nil { + s.logger.Info("send a request to stop updating a cluster") + close(s.watchClusterStopCh) + } + + s.logger.Info("wait for the cluster watching to stop") + <-s.watchClusterDoneCh + s.logger.Info("the cluster watching has been stopped") + + s.logger.Info("close all peer clients") + for id, c := range s.peerClients { + s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) + delete(s.peerClients, id) + if err := c.Close(); err != nil { + s.logger.Warn("failed to close client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } +} + +func (s *GRPCService) LivenessCheck(ctx context.Context, req *empty.Empty) (*protobuf.LivenessCheckResponse, error) { + resp := &protobuf.LivenessCheckResponse{} + + resp.Alive = true + + return resp, nil +} + +func (s *GRPCService) ReadinessCheck(ctx context.Context, req *empty.Empty) (*protobuf.ReadinessCheckResponse, error) { + resp := &protobuf.ReadinessCheckResponse{} + + timeout := 10 * time.Second + if err := s.raftServer.WaitForDetectLeader(timeout); err != nil { + s.logger.Error("missing leader node", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + if s.raftServer.State() == raft.Candidate || s.raftServer.State() == raft.Shutdown { + err := errors.ErrNodeNotReady + s.logger.Error(err.Error(), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.Ready = true + + return resp, nil +} + +func (s *GRPCService) Join(ctx context.Context, req *protobuf.JoinRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + err = c.Join(req) + if err != nil { + s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + err := s.raftServer.Join(req.Id, req.Node) + if err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + s.logger.Debug("node already exists", zap.Any("req", req), zap.Error(err)) + default: + s.logger.Error("failed to join node to the cluster", zap.String("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + } + + return resp, nil +} + +func (s *GRPCService) Leave(ctx context.Context, req *protobuf.LeaveRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + err = c.Leave(req) + if err != nil { + s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + err := s.raftServer.Leave(req.Id) + if err != nil { + s.logger.Error("failed to leave node from the cluster", zap.Any("req", req), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Node(ctx context.Context, req *empty.Empty) (*protobuf.NodeResponse, error) { + resp := &protobuf.NodeResponse{} + + node, err := s.raftServer.Node() + if err != nil { + s.logger.Error("failed to get node info", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.Node = node + + return resp, nil +} + +func (s *GRPCService) Cluster(ctx context.Context, req *empty.Empty) (*protobuf.ClusterResponse, error) { + resp := &protobuf.ClusterResponse{} + + cluster := &protobuf.Cluster{} + + nodes, err := s.raftServer.Nodes() + if err != nil { + s.logger.Error("failed to get cluster info", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + for id, node := range nodes { + if id == s.raftServer.id { + node.State = s.raftServer.StateStr() + } else { + c := s.peerClients[id] + nodeResp, err := c.Node() + if err != nil { + node.State = raft.Shutdown.String() + s.logger.Error("failed to get node info", zap.String("grpc_address", node.Metadata.GrpcAddress), zap.String("err", err.Error())) + } else { + node.State = nodeResp.Node.State + } + } + } + cluster.Nodes = nodes + + serverID, err := s.raftServer.LeaderID(60 * time.Second) + if err != nil { + s.logger.Error("failed to get cluster info", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + cluster.Leader = string(serverID) + + resp.Cluster = cluster + + return resp, nil +} + +func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + resp := &empty.Empty{} + + err := s.raftServer.Snapshot() + if err != nil { + s.logger.Error("failed to snapshot data", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Get(ctx context.Context, req *protobuf.GetRequest) (*protobuf.GetResponse, error) { + resp := &protobuf.GetResponse{} + + fields, err := s.raftServer.Get(req.Id) + if err != nil { + switch err { + case errors.ErrNotFound: + s.logger.Debug("document not found", zap.String("id", req.Id), zap.String("err", err.Error())) + return resp, status.Error(codes.NotFound, err.Error()) + default: + s.logger.Error("failed to get document", zap.String("id", req.Id), zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + } + fieldsBytes, err := json.Marshal(fields) + if err != nil { + s.logger.Error("failed to marshal fields map to bytes", zap.Any("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.Fields = fieldsBytes + + return resp, nil +} + +func (s *GRPCService) Set(ctx context.Context, req *protobuf.SetRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + if err = c.Set(req); err != nil { + s.logger.Error("failed to forward request to leader", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + if err := s.raftServer.Set(req); err != nil { + s.logger.Error("failed to index document", zap.Any("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Delete(ctx context.Context, req *protobuf.DeleteRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + if err = c.Delete(req); err != nil { + s.logger.Error("failed to forward request to leader", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + if err := s.raftServer.Delete(req); err != nil { + s.logger.Error("failed to delete document", zap.String("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) BulkIndex(ctx context.Context, req *protobuf.BulkIndexRequest) (*protobuf.BulkIndexResponse, error) { + resp := &protobuf.BulkIndexResponse{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + return c.BulkIndex(req) + } + + if err := s.raftServer.BulkIndex(req); err != nil { + s.logger.Error("failed to index documents in bulk", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) BulkDelete(ctx context.Context, req *protobuf.BulkDeleteRequest) (*protobuf.BulkDeleteResponse, error) { + resp := &protobuf.BulkDeleteResponse{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + return c.BulkDelete(req) + } + + if err := s.raftServer.BulkDelete(req); err != nil { + s.logger.Error("failed to delete documents in bulk", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { + resp := &protobuf.SearchResponse{} + + searchRequest := &bleve.SearchRequest{} + if err := json.Unmarshal(req.SearchRequest, searchRequest); err != nil { + s.logger.Error("failed to unmarshal bytes to search request", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + searchResult, err := s.raftServer.Search(searchRequest) + if err != nil { + s.logger.Error("failed to search documents", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + searchResultBytes, err := json.Marshal(searchResult) + if err != nil { + s.logger.Error("failed to marshal search result to bytes", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.SearchResult = searchResultBytes + + return resp, nil +} + +func (s *GRPCService) Mapping(ctx context.Context, req *empty.Empty) (*protobuf.MappingResponse, error) { + resp := &protobuf.MappingResponse{} + + var err error + + resp, err = s.raftServer.Mapping() + if err != nil { + s.logger.Error("failed to get document", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Watch(req *empty.Empty, server protobuf.Index_WatchServer) error { + chans := make(chan protobuf.WatchResponse) + + s.watchMutex.Lock() + s.watchChans[chans] = struct{}{} + s.watchMutex.Unlock() + + defer func() { + s.watchMutex.Lock() + delete(s.watchChans, chans) + s.watchMutex.Unlock() + close(chans) + }() + + for resp := range chans { + if err := server.Send(&resp); err != nil { + s.logger.Error("failed to send watch data", zap.String("event", resp.Event.String()), zap.Error(err)) + return status.Error(codes.Internal, err.Error()) + } + } + + return nil +} + +func (s *GRPCService) Metrics(ctx context.Context, req *empty.Empty) (*protobuf.MetricsResponse, error) { + resp := &protobuf.MetricsResponse{} + + var err error + + gather, err := metric.Registry.Gather() + if err != nil { + s.logger.Error("failed to get gather", zap.Error(err)) + } + out := &bytes.Buffer{} + for _, mf := range gather { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + s.logger.Error("failed to parse metric family", zap.Error(err)) + } + } + + resp.Metrics = out.Bytes() + + return resp, nil +} diff --git a/server/grpc_service_test.go b/server/grpc_service_test.go new file mode 100644 index 0000000..753128b --- /dev/null +++ b/server/grpc_service_test.go @@ -0,0 +1,1072 @@ +package server + +import ( + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/util" +) + +func Test_GRPCService_Start_Stop(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + // Raft server + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + raftServer, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := raftServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + if err := raftServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + // gRPC service + certificateFile := "" + commonName := "" + + grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcService.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcService.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) +} + +//func Test_GRPCService_LivenessCheck(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// +// raftAddress := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) +// +// // Raft server +// dir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir) +// }() +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile := "" +// commonName := "" +// +// grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// if err := grpcService.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // server +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// grpcServer := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer, grpcService) +// listener, err := net.Listen("tcp", grpcAddress) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer.Stop() +// }() +// go func() { +// if err := grpcServer.Serve(listener); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// time.Sleep(3 * time.Second) +// +// ctx := context.Background() +// req := &empty.Empty{} +// +// resp, err := grpcService.LivenessCheck(ctx, req) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// if !resp.Alive { +// t.Fatalf("expected content to see %v, saw %v", true, resp.Alive) +// } +//} + +//func Test_GRPCService_ReadinessCheck(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// +// raftAddress := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) +// +// // Raft server +// dir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir) +// }() +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile := "" +// commonName := "" +// +// grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := grpcService.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // server +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// grpcServer := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer, grpcService) +// listener, err := net.Listen("tcp", grpcAddress) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer.Stop() +// }() +// go func() { +// if err := grpcServer.Serve(listener); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// time.Sleep(3 * time.Second) +// +// ctx := context.Background() +// req := &empty.Empty{} +// +// resp, err := grpcService.ReadinessCheck(ctx, req) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// if !resp.Ready { +// t.Fatalf("expected content to see %v, saw %v", true, resp.Ready) +// } +//} + +//func Test_GRPCService_Join(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// +// certificateFile := "" +// commonName := "" +// +// raftAddress := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress := fmt.Sprintf(":%d", util.TmpPort()) +// +// dir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir) +// }() +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// // Raft server +// raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := grpcService.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // server +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// grpcServer := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer, grpcService) +// listener, err := net.Listen("tcp", grpcAddress) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer.Stop() +// }() +// go func() { +// if err := grpcServer.Serve(listener); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// time.Sleep(3 * time.Second) +// +// ctx := context.Background() +// req := &protobuf.JoinRequest{ +// Id: "node1", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress, +// HttpAddress: httpAddress, +// }, +// }, +// } +// +// _, err = grpcService.Join(ctx, req) +// if err != nil { +// t.Fatalf("%v", err) +// } +//} + +//func Test_GRPCService_Node(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// +// raftAddress := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress := fmt.Sprintf(":%d", util.TmpPort()) +// +// dir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir) +// }() +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// // Raft server +// raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile := "" +// commonName := "" +// +// grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// if err := grpcService.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // server +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// grpcServer := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer, grpcService) +// listener, err := net.Listen("tcp", grpcAddress) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer.Stop() +// }() +// go func() { +// if err := grpcServer.Serve(listener); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// time.Sleep(3 * time.Second) +// +// ctx := context.Background() +// req := &protobuf.JoinRequest{ +// Id: "node1", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress, +// HttpAddress: httpAddress, +// }, +// }, +// } +// +// _, err = grpcService.Join(ctx, req) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// resp, err := grpcService.Node(ctx, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// if raftAddress != resp.Node.RaftAddress { +// t.Fatalf("expected content to see %v, saw %v", raftAddress, resp.Node.RaftAddress) +// } +// +// if grpcAddress != resp.Node.Metadata.GrpcAddress { +// t.Fatalf("expected content to see %v, saw %v", grpcAddress, resp.Node.Metadata.GrpcAddress) +// } +// +// if httpAddress != resp.Node.Metadata.HttpAddress { +// t.Fatalf("expected content to see %v, saw %v", grpcAddress, resp.Node.Metadata.HttpAddress) +// } +// +// if raft.Leader.String() != resp.Node.State { +// t.Fatalf("expected content to see %v, saw %v", raft.Leader.String(), resp.Node.State) +// } +//} + +//func Test_GRPCService_Leave(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// logger := log.NewLogger("DEBUG", "", 500, 3, 30, false) +// +// certificateFile := "" +// commonName := "" +// +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// +// ctx := context.Background() +// +// // Node1 +// raftAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// dir1 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir1) +// }() +// +// // Raft server +// raftServer1, err := NewRaftServer("node1", raftAddress1, dir1, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer1.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer1.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// grpcService1, err := NewGRPCService(raftServer1, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService1.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := grpcService1.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC server +// grpcServer1 := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer1, grpcService1) +// listener1, err := net.Listen("tcp", grpcAddress1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer1.Stop() +// }() +// go func() { +// if err := grpcServer1.Serve(listener1); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer1.WaitForDetectLeader(60 * time.Second); err != nil { +// t.Fatalf("%v", err) +// } +// time.Sleep(10 * time.Second) +// +// req1 := &protobuf.JoinRequest{ +// Id: "node1", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress1, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress1, +// HttpAddress: httpAddress1, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx, req1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// // Node2 +// raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// dir2 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir2) +// }() +// +// // Raft server +// raftServer2, err := NewRaftServer("node2", raftAddress2, dir2, indexMapping, false, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer2.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer2.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// grpcService2, err := NewGRPCService(raftServer2, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService2.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := grpcService2.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC server +// grpcServer2 := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer2, grpcService2) +// listener2, err := net.Listen("tcp", grpcAddress2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer2.Stop() +// }() +// go func() { +// if err := grpcServer2.Serve(listener2); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// time.Sleep(10 * time.Second) +// +// req2 := &protobuf.JoinRequest{ +// Id: "node2", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress2, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress2, +// HttpAddress: httpAddress2, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx, req2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// // Node3 +// raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// dir3 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir3) +// }() +// +// // Raft server +// raftServer3, err := NewRaftServer("node3", raftAddress3, dir3, indexMapping, false, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer3.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer3.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// grpcService3, err := NewGRPCService(raftServer3, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService3.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := grpcService3.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC server +// grpcServer3 := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer3, grpcService3) +// listener3, err := net.Listen("tcp", grpcAddress3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer3.Stop() +// }() +// go func() { +// if err := grpcServer3.Serve(listener3); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// time.Sleep(10 * time.Second) +// +// req3 := &protobuf.JoinRequest{ +// Id: "node3", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress3, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress3, +// HttpAddress: httpAddress3, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx, req3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// resp, err := grpcService1.Cluster(ctx, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// if "node1" != resp.Cluster.Leader { +// t.Fatalf("expected content to see %v, saw %v", "node1", resp.Cluster.Leader) +// } +// +// //if raftAddress1 != resp..RaftAddress { +// // t.Fatalf("expected content to see %v, saw %v", raftAddress1, resp.Node.RaftAddress) +// //} +// // +// //if grpcAddress1 != resp.Node.Metadata.GrpcAddress { +// // t.Fatalf("expected content to see %v, saw %v", grpcAddress1, resp.Node.Metadata.GrpcAddress) +// //} +// // +// //if httpAddress1 != resp.Node.Metadata.HttpAddress { +// // t.Fatalf("expected content to see %v, saw %v", grpcAddress1, resp.Node.Metadata.HttpAddress) +// //} +// // +// //if raft.Leader.String() != resp.Node.State { +// // t.Fatalf("expected content to see %v, saw %v", raft.Leader.String(), resp.Node.State) +// //} +//} + +//func Test_GRPCService_Cluster(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// // Raft server +// raftAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// dir1 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir1) +// }() +// indexMapping1, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// logger1 := log.NewLogger("WARN", "", 500, 3, 30, false) +// raftServer1, err := NewRaftServer("node1", raftAddress1, dir1, indexMapping1, true, logger1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer1.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer1.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile1 := "" +// commonName1 := "" +// grpcService1, err := NewGRPCService(raftServer1, certificateFile1, commonName1, logger1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService1.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// if err := grpcService1.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// time.Sleep(3 * time.Second) +// +// grpcAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// +// ctx1 := context.Background() +// joinReq1 := &protobuf.JoinRequest{ +// Id: "node1", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress1, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress1, +// HttpAddress: httpAddress1, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx1, joinReq1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// // Raft server +// raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// dir2 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir2) +// }() +// indexMapping2, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// logger2 := log.NewLogger("WARN", "", 500, 3, 30, false) +// raftServer2, err := NewRaftServer("node2", raftAddress2, dir2, indexMapping2, false, logger2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer2.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer2.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile2 := "" +// commonName2 := "" +// grpcService2, err := NewGRPCService(raftServer2, certificateFile2, commonName2, logger2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService2.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// if err := grpcService2.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// time.Sleep(3 * time.Second) +// +// grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// +// ctx2 := context.Background() +// joinReq2 := &protobuf.JoinRequest{ +// Id: "node2", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress2, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress2, +// HttpAddress: httpAddress2, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx2, joinReq2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// // Raft server +// raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// dir3 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir3) +// }() +// indexMapping3, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// logger3 := log.NewLogger("WARN", "", 500, 3, 30, false) +// raftServer3, err := NewRaftServer("node3", raftAddress3, dir3, indexMapping3, false, logger3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer3.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer3.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile3 := "" +// commonName3 := "" +// grpcService3, err := NewGRPCService(raftServer3, certificateFile3, commonName3, logger3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService3.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// if err := grpcService3.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// time.Sleep(3 * time.Second) +// +// grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// +// ctx3 := context.Background() +// joinReq3 := &protobuf.JoinRequest{ +// Id: "node3", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress3, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress3, +// HttpAddress: httpAddress3, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx3, joinReq3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// respCluster1, err := grpcService1.Cluster(ctx1, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// if 3 != len(respCluster1.Cluster.Nodes) { +// t.Fatalf("expected content to see %v, saw %v", 3, len(respCluster1.Cluster.Nodes)) +// } +// +// respCluster2, err := grpcService2.Cluster(ctx2, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// if 3 != len(respCluster2.Cluster.Nodes) { +// t.Fatalf("expected content to see %v, saw %v", 3, len(respCluster2.Cluster.Nodes)) +// } +// +// respCluster3, err := grpcService2.Cluster(ctx3, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// if 3 != len(respCluster3.Cluster.Nodes) { +// t.Fatalf("expected content to see %v, saw %v", 3, len(respCluster3.Cluster.Nodes)) +// } +//} diff --git a/server/raft_fsm.go b/server/raft_fsm.go new file mode 100644 index 0000000..d03dcfa --- /dev/null +++ b/server/raft_fsm.go @@ -0,0 +1,400 @@ +package server + +import ( + "encoding/json" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/blevesearch/bleve/v2" + "github.com/blevesearch/bleve/v2/mapping" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/storage" + "go.uber.org/zap" +) + +type ApplyResponse struct { + count int + error error +} + +type RaftFSM struct { + logger *zap.Logger + + index *storage.Index + metadata map[string]*protobuf.Metadata + nodesMutex sync.RWMutex + + applyCh chan *protobuf.Event +} + +func NewRaftFSM(path string, indexMapping *mapping.IndexMappingImpl, logger *zap.Logger) (*RaftFSM, error) { + index, err := storage.NewIndex(path, indexMapping, logger) + if err != nil { + logger.Error("failed to create index store", zap.String("path", path), zap.Error(err)) + return nil, err + } + + return &RaftFSM{ + logger: logger, + index: index, + metadata: make(map[string]*protobuf.Metadata, 0), + applyCh: make(chan *protobuf.Event, 1024), + }, nil +} + +func (f *RaftFSM) Close() error { + f.applyCh <- nil + f.logger.Info("apply channel has closed") + + if err := f.index.Close(); err != nil { + f.logger.Error("failed to close index store", zap.Error(err)) + return err + } + + f.logger.Info("Index has closed") + + return nil +} + +func (f *RaftFSM) get(id string) (map[string]interface{}, error) { + return f.index.Get(id) +} + +func (f *RaftFSM) search(searchRequest *bleve.SearchRequest) (*bleve.SearchResult, error) { + return f.index.Search(searchRequest) +} + +func (f *RaftFSM) set(id string, fields map[string]interface{}) error { + return f.index.Index(id, fields) +} + +func (f *RaftFSM) delete(id string) error { + return f.index.Delete(id) +} + +func (f *RaftFSM) bulkIndex(docs []map[string]interface{}) (int, error) { + return f.index.BulkIndex(docs) +} + +func (f *RaftFSM) bulkDelete(ids []string) (int, error) { + return f.index.BulkDelete(ids) +} + +func (f *RaftFSM) getMetadata(id string) *protobuf.Metadata { + if metadata, exists := f.metadata[id]; exists { + return metadata + } else { + f.logger.Debug("metadata not found", zap.String("id", id)) + return nil + } +} + +func (f *RaftFSM) setMetadata(id string, metadata *protobuf.Metadata) error { + f.nodesMutex.Lock() + defer f.nodesMutex.Unlock() + + f.metadata[id] = metadata + + return nil +} + +func (f *RaftFSM) deleteMetadata(id string) error { + f.nodesMutex.Lock() + defer f.nodesMutex.Unlock() + + if _, exists := f.metadata[id]; exists { + delete(f.metadata, id) + } + + return nil +} + +func (f *RaftFSM) Apply(l *raft.Log) interface{} { + var event protobuf.Event + err := proto.Unmarshal(l.Data, &event) + if err != nil { + f.logger.Error("failed to unmarshal message bytes to KVS command", zap.Error(err)) + return err + } + + switch event.Type { + case protobuf.Event_Join: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := data.(*protobuf.SetMetadataRequest) + + if err := f.setMetadata(req.Id, req.Metadata); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_Leave: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := *data.(*protobuf.DeleteMetadataRequest) + + if err := f.deleteMetadata(req.Id); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_Set: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to set request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := *data.(*protobuf.SetRequest) + + var fields map[string]interface{} + if err := json.Unmarshal(req.Fields, &fields); err != nil { + return &ApplyResponse{error: err} + } + + if err := f.set(req.Id, fields); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_Delete: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to delete request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := *data.(*protobuf.DeleteRequest) + + if err := f.delete(req.Id); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_BulkIndex: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to set request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + req := *data.(*protobuf.BulkIndexRequest) + + docs := make([]map[string]interface{}, 0) + for _, r := range req.Requests { + var fields map[string]interface{} + if err := json.Unmarshal(r.Fields, &fields); err != nil { + f.logger.Error("failed to unmarshal bytes to map", zap.String("id", r.Id), zap.Error(err)) + continue + } + + doc := map[string]interface{}{ + "id": r.Id, + "fields": fields, + } + docs = append(docs, doc) + } + + count, err := f.bulkIndex(docs) + if err != nil { + return &ApplyResponse{count: count, error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{count: count, error: nil} + case protobuf.Event_BulkDelete: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to set request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + req := *data.(*protobuf.BulkDeleteRequest) + + ids := make([]string, 0) + for _, r := range req.Requests { + ids = append(ids, r.Id) + } + + count, err := f.bulkDelete(ids) + if err != nil { + return &ApplyResponse{count: count, error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{count: count, error: nil} + default: + err = errors.ErrUnsupportedEvent + f.logger.Error("unsupported command", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } +} + +func (f *RaftFSM) Stats() map[string]interface{} { + return f.index.Stats() +} + +func (f *RaftFSM) Mapping() *mapping.IndexMappingImpl { + return f.index.Mapping() +} + +func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { + return &KVSFSMSnapshot{ + index: f.index, + logger: f.logger, + }, nil +} + +func (f *RaftFSM) Restore(rc io.ReadCloser) error { + start := time.Now() + + f.logger.Info("start to restore items") + + defer func() { + err := rc.Close() + if err != nil { + f.logger.Error("failed to close reader", zap.Error(err)) + } + }() + + data, err := ioutil.ReadAll(rc) + if err != nil { + f.logger.Error("failed to open reader", zap.Error(err)) + return err + } + + count := uint64(0) + + buff := proto.NewBuffer(data) + for { + doc := &protobuf.Document{} + err = buff.DecodeMessage(doc) + if err == io.ErrUnexpectedEOF { + f.logger.Debug("reached the EOF", zap.Error(err)) + break + } + if err != nil { + f.logger.Error("failed to read document", zap.Error(err)) + return err + } + + var fields map[string]interface{} + if err := json.Unmarshal(doc.Fields, &fields); err != nil { + f.logger.Error("failed to unmarshal fields bytes to map", zap.Error(err)) + continue + } + + // apply item to store + if err = f.index.Index(doc.Id, fields); err != nil { + f.logger.Error("failed to index document", zap.Error(err)) + continue + } + + f.logger.Debug("document restored", zap.String("id", doc.Id)) + count = count + 1 + } + + f.logger.Info("finished to restore items", zap.Uint64("count", count), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) + + return nil +} + +// --------------------- + +type KVSFSMSnapshot struct { + index *storage.Index + logger *zap.Logger +} + +func (f *KVSFSMSnapshot) Persist(sink raft.SnapshotSink) error { + start := time.Now() + + f.logger.Info("start to persist items") + + defer func() { + if err := sink.Close(); err != nil { + f.logger.Error("failed to close sink", zap.Error(err)) + } + }() + + ch := f.index.SnapshotItems() + + count := uint64(0) + + for { + doc := <-ch + if doc == nil { + f.logger.Debug("channel closed") + break + } + + count = count + 1 + + buff := proto.NewBuffer([]byte{}) + if err := buff.EncodeMessage(doc); err != nil { + f.logger.Error("failed to encode document", zap.Error(err)) + return err + } + + if _, err := sink.Write(buff.Bytes()); err != nil { + f.logger.Error("failed to write document", zap.Error(err)) + return err + } + } + + f.logger.Info("finished to persist items", zap.Uint64("count", count), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) + + return nil +} + +func (f *KVSFSMSnapshot) Release() { + f.logger.Info("release") +} diff --git a/server/raft_fsm_test.go b/server/raft_fsm_test.go new file mode 100644 index 0000000..865f623 --- /dev/null +++ b/server/raft_fsm_test.go @@ -0,0 +1,743 @@ +package server + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/util" +) + +func Test_RaftFSM_Close(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_Set(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if ret := fsm.set(id, fields); ret != nil { + t.Fatal("failed to index document") + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_Get(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if ret := fsm.set(id, fields); ret != nil { + t.Fatal("failed to index document") + } + + f, err := fsm.get(id) + if err != nil { + t.Fatalf("%v", err) + } + if fields["title"].(string) != f["title"].(string) { + t.Fatalf("expected content to see %v, saw %v", fields["title"].(string), f["title"].(string)) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_Delete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if ret := fsm.set(id, fields); ret != nil { + t.Fatal("failed to index document") + } + + f, err := fsm.get(id) + if err != nil { + t.Fatalf("%v", err) + } + if fields["title"].(string) != f["title"].(string) { + t.Fatalf("expected content to see %v, saw %v", fields["title"].(string), f["title"].(string)) + } + + if ret := fsm.delete(id); ret != nil { + t.Fatal("failed to delete document") + } + + f, err = fsm.get(id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatal("failed to get document") + } + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_SetMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "node1" + metadata := &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + } + + if ret := fsm.setMetadata(id, metadata); ret != nil { + t.Fatal("failed to index document") + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_GetMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "node1" + metadata := &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + } + + if ret := fsm.setMetadata(id, metadata); ret != nil { + t.Fatal("failed to index document") + } + + m := fsm.getMetadata(id) + if metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", metadata.GrpcAddress, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_DeleteMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "node1" + metadata := &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + } + + if ret := fsm.setMetadata(id, metadata); ret != nil { + t.Fatal("failed to set metadata") + } + + m := fsm.getMetadata(id) + if metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", metadata.GrpcAddress, m.GrpcAddress) + } + + if ret := fsm.deleteMetadata(id); ret != nil { + t.Fatal("failed to delete metadata") + } + + m = fsm.getMetadata(id) + if m != nil { + t.Fatalf("expected content to see %v, saw %v", nil, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplyJoin(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + data := &protobuf.SetMetadataRequest{ + Id: "node1", + Metadata: &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + }, + } + + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(data, dataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + event := &protobuf.Event{ + Type: protobuf.Event_Join, + Data: dataAny, + } + + eventData, err := proto.Marshal(event) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + raftLog := &raft.Log{ + Data: eventData, + } + + ret := fsm.Apply(raftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + m := fsm.getMetadata(data.Id) + if data.Metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", data.Metadata.GrpcAddress, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplyLeave(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + // apply joni + setData := &protobuf.SetMetadataRequest{ + Id: "node1", + Metadata: &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + }, + } + + setDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(setData, setDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + joinEvent := &protobuf.Event{ + Type: protobuf.Event_Join, + Data: setDataAny, + } + + joinEventData, err := proto.Marshal(joinEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + joinRaftLog := &raft.Log{ + Data: joinEventData, + } + + ret := fsm.Apply(joinRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + m := fsm.getMetadata(setData.Id) + if setData.Metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", setData.Metadata.GrpcAddress, m.GrpcAddress) + } + + // apply leave + deleteData := &protobuf.DeleteMetadataRequest{ + Id: "node1", + } + + deleteDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(deleteData, deleteDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + leaveEvent := &protobuf.Event{ + Type: protobuf.Event_Leave, + Data: deleteDataAny, + } + + leaveEventData, err := proto.Marshal(leaveEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + leaveRaftLog := &raft.Log{ + Data: leaveEventData, + } + + ret = fsm.Apply(leaveRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + m = fsm.getMetadata(deleteData.Id) + if m != nil { + t.Fatalf("expected content to see %v, saw %v", nil, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplySet(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + fieldsBytes, err := json.Marshal(fields) + if err != nil { + t.Fatalf("%v", err) + } + + // apply set + setData := &protobuf.SetRequest{ + Id: "1", + Fields: fieldsBytes, + } + + setDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(setData, setDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + setEvent := &protobuf.Event{ + Type: protobuf.Event_Set, + Data: setDataAny, + } + + setEventData, err := proto.Marshal(setEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + setRaftLog := &raft.Log{ + Data: setEventData, + } + + ret := fsm.Apply(setRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + f, err := fsm.get(setData.Id) + if err != nil { + t.Fatal("failed to get document") + } + if fields["title"] != f["title"] { + t.Fatalf("expected content to see %v, saw %v", fields["title"], f["title"]) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplyDelete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + fieldsBytes, err := json.Marshal(fields) + if err != nil { + t.Fatalf("%v", err) + } + + // apply set + setData := &protobuf.SetRequest{ + Id: "1", + Fields: fieldsBytes, + } + + setDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(setData, setDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + setEvent := &protobuf.Event{ + Type: protobuf.Event_Set, + Data: setDataAny, + } + + setEventData, err := proto.Marshal(setEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + setRaftLog := &raft.Log{ + Data: setEventData, + } + + ret := fsm.Apply(setRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + f, err := fsm.get(setData.Id) + if err != nil { + t.Fatal("failed to get document") + } + if fields["title"] != f["title"] { + t.Fatalf("expected content to see %v, saw %v", fields["title"], f["title"]) + } + + // apply delete + deleteData := &protobuf.DeleteRequest{ + Id: "1", + } + + deleteDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(deleteData, deleteDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + deleteEvent := &protobuf.Event{ + Type: protobuf.Event_Delete, + Data: deleteDataAny, + } + + deleteEventData, err := proto.Marshal(deleteEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + deleteRaftLog := &raft.Log{ + Data: deleteEventData, + } + + ret = fsm.Apply(deleteRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + f, err = fsm.get(deleteData.Id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatal("failed to get document") + } + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} diff --git a/server/raft_server.go b/server/raft_server.go new file mode 100644 index 0000000..5232081 --- /dev/null +++ b/server/raft_server.go @@ -0,0 +1,857 @@ +package server + +import ( + "encoding/json" + "io/ioutil" + "net" + "os" + "path/filepath" + "strconv" + "time" + + raftbadgerdb "github.com/bbva/raft-badger" + "github.com/blevesearch/bleve/v2" + "github.com/blevesearch/bleve/v2/mapping" + "github.com/dgraph-io/badger/v2" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/metric" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" +) + +type RaftServer struct { + id string + raftAddress string + dataDirectory string + bootstrap bool + logger *zap.Logger + + fsm *RaftFSM + + transport *raft.NetworkTransport + raft *raft.Raft + + watchClusterStopCh chan struct{} + watchClusterDoneCh chan struct{} + + applyCh chan *protobuf.Event +} + +func NewRaftServer(id string, raftAddress string, dataDirectory string, indexMapping *mapping.IndexMappingImpl, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { + indexPath := filepath.Join(dataDirectory, "index") + fsm, err := NewRaftFSM(indexPath, indexMapping, logger) + if err != nil { + logger.Error("failed to create FSM", zap.String("index_path", indexPath), zap.Error(err)) + return nil, err + } + + return &RaftServer{ + id: id, + raftAddress: raftAddress, + dataDirectory: dataDirectory, + bootstrap: bootstrap, + fsm: fsm, + logger: logger, + + watchClusterStopCh: make(chan struct{}), + watchClusterDoneCh: make(chan struct{}), + + applyCh: make(chan *protobuf.Event, 1024), + }, nil +} + +func (s *RaftServer) Start() error { + config := raft.DefaultConfig() + config.LocalID = raft.ServerID(s.id) + config.SnapshotThreshold = 1024 + config.LogOutput = ioutil.Discard + + addr, err := net.ResolveTCPAddr("tcp", s.raftAddress) + if err != nil { + s.logger.Error("failed to resolve TCP address", zap.String("raft_address", s.raftAddress), zap.Error(err)) + return err + } + + s.transport, err = raft.NewTCPTransport(s.raftAddress, addr, 3, 10*time.Second, ioutil.Discard) + if err != nil { + s.logger.Error("failed to create TCP transport", zap.String("raft_address", s.raftAddress), zap.Error(err)) + return err + } + + // create snapshot store + snapshotStore, err := raft.NewFileSnapshotStore(s.dataDirectory, 2, ioutil.Discard) + if err != nil { + s.logger.Error("failed to create file snapshot store", zap.String("path", s.dataDirectory), zap.Error(err)) + return err + } + + logStorePath := filepath.Join(s.dataDirectory, "raft", "log") + err = os.MkdirAll(logStorePath, 0755) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + logStoreBadgerOpts := badger.DefaultOptions(logStorePath) + logStoreBadgerOpts.ValueDir = logStorePath + logStoreBadgerOpts.SyncWrites = false + logStoreBadgerOpts.Logger = nil + logStoreOpts := raftbadgerdb.Options{ + Path: logStorePath, + BadgerOptions: &logStoreBadgerOpts, + } + raftLogStore, err := raftbadgerdb.New(logStoreOpts) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + stableStorePath := filepath.Join(s.dataDirectory, "raft", "stable") + err = os.MkdirAll(stableStorePath, 0755) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + stableStoreBadgerOpts := badger.DefaultOptions(stableStorePath) + stableStoreBadgerOpts.ValueDir = stableStorePath + stableStoreBadgerOpts.SyncWrites = false + stableStoreBadgerOpts.Logger = nil + stableStoreOpts := raftbadgerdb.Options{ + Path: stableStorePath, + BadgerOptions: &stableStoreBadgerOpts, + } + raftStableStore, err := raftbadgerdb.New(stableStoreOpts) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + // create raft + s.raft, err = raft.NewRaft(config, s.fsm, raftLogStore, raftStableStore, snapshotStore, s.transport) + if err != nil { + s.logger.Error("failed to create raft", zap.Any("config", config), zap.Error(err)) + return err + } + + if s.bootstrap { + configuration := raft.Configuration{ + Servers: []raft.Server{ + { + ID: config.LocalID, + Address: s.transport.LocalAddr(), + }, + }, + } + s.raft.BootstrapCluster(configuration) + } + + go func() { + s.startWatchCluster(500 * time.Millisecond) + }() + + s.logger.Info("Raft server started", zap.String("raft_address", s.raftAddress)) + return nil +} + +func (s *RaftServer) Stop() error { + s.applyCh <- nil + s.logger.Info("apply channel has closed") + + s.stopWatchCluster() + + if err := s.fsm.Close(); err != nil { + s.logger.Error("failed to close FSM", zap.Error(err)) + } + s.logger.Info("Raft FSM Closed") + + if future := s.raft.Shutdown(); future.Error() != nil { + s.logger.Info("failed to shutdown Raft", zap.Error(future.Error())) + } + s.logger.Info("Raft has shutdown", zap.String("raft_address", s.raftAddress)) + + return nil +} + +func (s *RaftServer) startWatchCluster(checkInterval time.Duration) { + s.logger.Info("start to update cluster info") + + defer func() { + close(s.watchClusterDoneCh) + }() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + timeout := 60 * time.Second + if err := s.WaitForDetectLeader(timeout); err != nil { + if err == errors.ErrTimeout { + s.logger.Error("leader detection timed out", zap.Duration("timeout", timeout), zap.Error(err)) + } else { + s.logger.Error("failed to detect leader", zap.Error(err)) + } + } + + for { + select { + case <-s.watchClusterStopCh: + s.logger.Info("received a request to stop updating a cluster") + return + case <-s.raft.LeaderCh(): + s.logger.Info("became a leader", zap.String("leaderAddr", string(s.raft.Leader()))) + case event := <-s.fsm.applyCh: + s.applyCh <- event + case <-ticker.C: + raftStats := s.raft.Stats() + + switch raftStats["state"] { + case "Follower": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Follower)) + case "Candidate": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Candidate)) + case "Leader": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Leader)) + case "Shutdown": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Shutdown)) + } + + if term, err := strconv.ParseFloat(raftStats["term"], 64); err == nil { + metric.RaftTermMetric.WithLabelValues(s.id).Set(term) + } + + if lastLogIndex, err := strconv.ParseFloat(raftStats["last_log_index"], 64); err == nil { + metric.RaftLastLogIndexMetric.WithLabelValues(s.id).Set(lastLogIndex) + } + + if lastLogTerm, err := strconv.ParseFloat(raftStats["last_log_term"], 64); err == nil { + metric.RaftLastLogTermMetric.WithLabelValues(s.id).Set(lastLogTerm) + } + + if commitIndex, err := strconv.ParseFloat(raftStats["commit_index"], 64); err == nil { + metric.RaftCommitIndexMetric.WithLabelValues(s.id).Set(commitIndex) + } + + if appliedIndex, err := strconv.ParseFloat(raftStats["applied_index"], 64); err == nil { + metric.RaftAppliedIndexMetric.WithLabelValues(s.id).Set(appliedIndex) + } + + if fsmPending, err := strconv.ParseFloat(raftStats["fsm_pending"], 64); err == nil { + metric.RaftFsmPendingMetric.WithLabelValues(s.id).Set(fsmPending) + } + + if lastSnapshotIndex, err := strconv.ParseFloat(raftStats["last_snapshot_index"], 64); err == nil { + metric.RaftLastSnapshotIndexMetric.WithLabelValues(s.id).Set(lastSnapshotIndex) + } + + if lastSnapshotTerm, err := strconv.ParseFloat(raftStats["last_snapshot_term"], 64); err == nil { + metric.RaftLastSnapshotTermMetric.WithLabelValues(s.id).Set(lastSnapshotTerm) + } + + if latestConfigurationIndex, err := strconv.ParseFloat(raftStats["latest_configuration_index"], 64); err == nil { + metric.RaftLatestConfigurationIndexMetric.WithLabelValues(s.id).Set(latestConfigurationIndex) + } + + if numPeers, err := strconv.ParseFloat(raftStats["num_peers"], 64); err == nil { + metric.RaftNumPeersMetric.WithLabelValues(s.id).Set(numPeers) + } + + if lastContact, err := strconv.ParseFloat(raftStats["last_contact"], 64); err == nil { + metric.RaftLastContactMetric.WithLabelValues(s.id).Set(lastContact) + } + + if nodes, err := s.Nodes(); err == nil { + metric.RaftNumNodesMetric.WithLabelValues(s.id).Set(float64(len(nodes))) + } + + indexStats := s.fsm.Stats() + + tmpIndex := indexStats["index"].(map[string]interface{}) + + metric.IndexCurOnDiskBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["CurOnDiskBytes"].(uint64))) + + metric.IndexCurOnDiskFilesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["CurOnDiskFiles"].(uint64))) + + metric.IndexCurRootEpochMetric.WithLabelValues(s.id).Set(float64(tmpIndex["CurRootEpoch"].(uint64))) + + metric.IndexLastMergedEpochMetric.WithLabelValues(s.id).Set(float64(tmpIndex["LastMergedEpoch"].(uint64))) + + metric.IndexLastPersistedEpochMetric.WithLabelValues(s.id).Set(float64(tmpIndex["LastPersistedEpoch"].(uint64))) + + metric.IndexMaxBatchIntroTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["MaxBatchIntroTime"].(uint64))) + + metric.IndexMaxFileMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["MaxFileMergeZapTime"].(uint64))) + + metric.IndexMaxMemMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["MaxMemMergeZapTime"].(uint64))) + + metric.IndexTotAnalysisTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotAnalysisTime"].(uint64))) + + metric.IndexTotBatchIntroTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotBatchIntroTime"].(uint64))) + + metric.IndexTotBatchesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotBatches"].(uint64))) + + metric.IndexTotBatchesEmptyMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotBatchesEmpty"].(uint64))) + + metric.IndexTotDeletesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotDeletes"].(uint64))) + + metric.IndexTotFileMergeIntroductionsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeIntroductions"].(uint64))) + + metric.IndexTotFileMergeIntroductionsDoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeIntroductionsDone"].(uint64))) + + metric.IndexTotFileMergeIntroductionsSkippedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeIntroductionsSkipped"].(uint64))) + + metric.IndexTotFileMergeLoopBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeLoopBeg"].(uint64))) + + metric.IndexTotFileMergeLoopEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeLoopEnd"].(uint64))) + + metric.IndexTotFileMergeLoopErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeLoopErr"].(uint64))) + + metric.IndexTotFileMergePlanMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlan"].(uint64))) + + metric.IndexTotFileMergePlanErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanErr"].(uint64))) + + metric.IndexTotFileMergePlanNoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanNone"].(uint64))) + + metric.IndexTotFileMergePlanOkMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanOk"].(uint64))) + + metric.IndexTotFileMergePlanTasksMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasks"].(uint64))) + + metric.IndexTotFileMergePlanTasksDoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksDone"].(uint64))) + + metric.IndexTotFileMergePlanTasksErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksErr"].(uint64))) + + metric.IndexTotFileMergePlanTasksSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksSegments"].(uint64))) + + metric.IndexTotFileMergePlanTasksSegmentsEmptyMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksSegmentsEmpty"].(uint64))) + + metric.IndexTotFileMergeSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeSegments"].(uint64))) + + metric.IndexTotFileMergeSegmentsEmptyMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeSegmentsEmpty"].(uint64))) + + metric.IndexTotFileMergeWrittenBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeWrittenBytes"].(uint64))) + + metric.IndexTotFileMergeZapBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeZapBeg"].(uint64))) + + metric.IndexTotFileMergeZapEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeZapEnd"].(uint64))) + + metric.IndexTotFileMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeZapTime"].(uint64))) + + metric.IndexTotFileSegmentsAtRootMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileSegmentsAtRoot"].(uint64))) + + metric.IndexTotIndexTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIndexTime"].(uint64))) + + metric.IndexTotIndexedPlainTextBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIndexedPlainTextBytes"].(uint64))) + + metric.IndexTotIntroduceLoopMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceLoop"].(uint64))) + + metric.IndexTotIntroduceMergeBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceMergeBeg"].(uint64))) + + metric.IndexTotIntroduceMergeEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceMergeEnd"].(uint64))) + + metric.IndexTotIntroducePersistBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducePersistBeg"].(uint64))) + + metric.IndexTotIntroducePersistEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducePersistEnd"].(uint64))) + + metric.IndexTotIntroduceRevertBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceRevertBeg"].(uint64))) + + metric.IndexTotIntroduceRevertEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceRevertEnd"].(uint64))) + + metric.IndexTotIntroduceSegmentBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceSegmentBeg"].(uint64))) + + metric.IndexTotIntroduceSegmentEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceSegmentEnd"].(uint64))) + + metric.IndexTotIntroducedItemsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducedItems"].(uint64))) + + metric.IndexTotIntroducedSegmentsBatchMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducedSegmentsBatch"].(uint64))) + + metric.IndexTotIntroducedSegmentsMergeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducedSegmentsMerge"].(uint64))) + + metric.IndexTotItemsToPersistMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotItemsToPersist"].(uint64))) + + metric.IndexTotMemMergeBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeBeg"].(uint64))) + + metric.IndexTotMemMergeDoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeDone"].(uint64))) + + metric.IndexTotMemMergeErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeErr"].(uint64))) + + metric.IndexTotMemMergeSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeSegments"].(uint64))) + + metric.IndexTotMemMergeZapBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeZapBeg"].(uint64))) + + metric.IndexTotMemMergeZapEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeZapEnd"].(uint64))) + + metric.IndexTotMemMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeZapTime"].(uint64))) + + metric.IndexTotMemorySegmentsAtRootMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemorySegmentsAtRoot"].(uint64))) + + metric.IndexTotOnErrorsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotOnErrors"].(uint64))) + + metric.IndexTotPersistLoopBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopBeg"].(uint64))) + + metric.IndexTotPersistLoopEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopEnd"].(uint64))) + + metric.IndexTotPersistLoopErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopErr"].(uint64))) + + metric.IndexTotPersistLoopProgressMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopProgress"].(uint64))) + + metric.IndexTotPersistLoopWaitMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopWait"].(uint64))) + + metric.IndexTotPersistLoopWaitNotifiedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopWaitNotified"].(uint64))) + + metric.IndexTotPersistedItemsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistedItems"].(uint64))) + + metric.IndexTotPersistedSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistedSegments"].(uint64))) + + metric.IndexTotPersisterMergerNapBreakMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterMergerNapBreak"].(uint64))) + + metric.IndexTotPersisterNapPauseCompletedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterNapPauseCompleted"].(uint64))) + + metric.IndexTotPersisterSlowMergerPauseMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterSlowMergerPause"].(uint64))) + + metric.IndexTotPersisterSlowMergerResumeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterSlowMergerResume"].(uint64))) + + metric.IndexTotTermSearchersFinishedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotTermSearchersFinished"].(uint64))) + + metric.IndexTotTermSearchersStartedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotTermSearchersStarted"].(uint64))) + + metric.IndexTotUpdatesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotUpdates"].(uint64))) + + metric.IndexAnalysisTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["analysis_time"].(uint64))) + + metric.IndexBatchesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["batches"].(uint64))) + + metric.IndexDeletesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["deletes"].(uint64))) + + metric.IndexErrorsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["errors"].(uint64))) + + metric.IndexIndexTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["index_time"].(uint64))) + + metric.IndexNumBytesUsedDiskMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_bytes_used_disk"].(uint64))) + + metric.IndexNumFilesOnDiskMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_files_on_disk"].(uint64))) + + metric.IndexNumItemsIntroducedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_items_introduced"].(uint64))) + + metric.IndexNumItemsPersistedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_items_persisted"].(uint64))) + + metric.IndexNumPersisterNapMergerBreakMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_persister_nap_merger_break"].(uint64))) + + metric.IndexNumPersisterNapPauseCompletedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_persister_nap_pause_completed"].(uint64))) + + metric.IndexNumPlainTextBytesIndexedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_plain_text_bytes_indexed"].(uint64))) + + metric.IndexNumRecsToPersistMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_recs_to_persist"].(uint64))) + + metric.IndexNumRootFilesegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_root_filesegments"].(uint64))) + + metric.IndexNumRootMemorysegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_root_memorysegments"].(uint64))) + + metric.IndexTermSearchersFinishedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["term_searchers_finished"].(uint64))) + + metric.IndexTermSearchersStartedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["term_searchers_started"].(uint64))) + + metric.IndexTotalCompactionWrittenBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["total_compaction_written_bytes"].(uint64))) + + metric.IndexUpdatesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["updates"].(uint64))) + + metric.SearchTimeMetric.WithLabelValues(s.id).Set(float64(indexStats["search_time"].(uint64))) + + metric.SearchesMetric.WithLabelValues(s.id).Set(float64(indexStats["searches"].(uint64))) + } + } +} + +func (s *RaftServer) stopWatchCluster() { + if s.watchClusterStopCh != nil { + s.logger.Info("send a request to stop updating a cluster") + close(s.watchClusterStopCh) + } + + s.logger.Info("wait for the cluster update to stop") + <-s.watchClusterDoneCh + s.logger.Info("the cluster update has been stopped") +} + +func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + timer := time.NewTimer(timeout) + defer timer.Stop() + + for { + select { + case <-ticker.C: + leaderAddr := s.raft.Leader() + if leaderAddr != "" { + s.logger.Debug("detected a leader address", zap.String("raft_address", string(leaderAddr))) + return leaderAddr, nil + } + case <-timer.C: + err := errors.ErrTimeout + s.logger.Error("failed to detect leader address", zap.Error(err)) + return "", err + } + } +} + +func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { + leaderAddr, err := s.LeaderAddress(timeout) + if err != nil { + s.logger.Error("failed to get leader address", zap.Error(err)) + return "", err + } + + cf := s.raft.GetConfiguration() + if err = cf.Error(); err != nil { + s.logger.Error("failed to get Raft configuration", zap.Error(err)) + return "", err + } + + for _, server := range cf.Configuration().Servers { + if server.Address == leaderAddr { + s.logger.Info("detected a leader ID", zap.String("id", string(server.ID))) + return server.ID, nil + } + } + + err = errors.ErrNotFoundLeader + s.logger.Error("failed to detect leader ID", zap.Error(err)) + return "", err +} + +func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { + if _, err := s.LeaderAddress(timeout); err != nil { + s.logger.Error("failed to wait for detect leader", zap.Error(err)) + return err + } + + return nil +} + +func (s *RaftServer) State() raft.RaftState { + return s.raft.State() +} + +func (s *RaftServer) StateStr() string { + return s.State().String() +} + +func (s *RaftServer) Exist(id string) (bool, error) { + exist := false + + cf := s.raft.GetConfiguration() + if err := cf.Error(); err != nil { + s.logger.Error("failed to get Raft configuration", zap.Error(err)) + return false, err + } + + for _, server := range cf.Configuration().Servers { + if server.ID == raft.ServerID(id) { + s.logger.Debug("node already joined the cluster", zap.String("id", id)) + exist = true + break + } + } + + return exist, nil +} + +func (s *RaftServer) getMetadata(id string) (*protobuf.Metadata, error) { + metadata := s.fsm.getMetadata(id) + if metadata == nil { + return nil, errors.ErrNotFound + } + + return metadata, nil +} + +func (s *RaftServer) setMetadata(id string, metadata *protobuf.Metadata) error { + data := &protobuf.SetMetadataRequest{ + Id: id, + Metadata: metadata, + } + + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(data, dataAny); err != nil { + s.logger.Error("failed to unmarshal request to the command data", zap.String("id", id), zap.Any("metadata", metadata), zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_Join, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal the command into the bytes as message", zap.String("id", id), zap.Any("metadata", metadata), zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) deleteMetadata(id string) error { + data := &protobuf.DeleteMetadataRequest{ + Id: id, + } + + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(data, dataAny); err != nil { + s.logger.Error("failed to unmarshal request to the command data", zap.String("id", id), zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_Leave, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal the command into the bytes as the message", zap.String("id", id), zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Join(id string, node *protobuf.Node) error { + exist, err := s.Exist(id) + if err != nil { + return err + } + + if !exist { + if future := s.raft.AddVoter(raft.ServerID(id), raft.ServerAddress(node.RaftAddress), 0, 0); future.Error() != nil { + s.logger.Error("failed to add voter", zap.String("id", id), zap.String("raft_address", node.RaftAddress), zap.Error(future.Error())) + return future.Error() + } + s.logger.Info("node has successfully joined", zap.String("id", id), zap.String("raft_address", node.RaftAddress)) + } + + if err := s.setMetadata(id, node.Metadata); err != nil { + return err + } + + if exist { + return errors.ErrNodeAlreadyExists + } + + return nil +} + +func (s *RaftServer) Leave(id string) error { + exist, err := s.Exist(id) + if err != nil { + return err + } + + if exist { + if future := s.raft.RemoveServer(raft.ServerID(id), 0, 0); future.Error() != nil { + s.logger.Error("failed to remove server", zap.String("id", id), zap.Error(future.Error())) + return future.Error() + } + s.logger.Info("node has successfully left", zap.String("id", id)) + } + + if err = s.deleteMetadata(id); err != nil { + return err + } + + if !exist { + return errors.ErrNodeDoesNotExist + } + + return nil +} + +func (s *RaftServer) Node() (*protobuf.Node, error) { + nodes, err := s.Nodes() + if err != nil { + return nil, err + } + + node, ok := nodes[s.id] + if !ok { + return nil, errors.ErrNotFound + } + + node.State = s.StateStr() + + return node, nil +} + +func (s *RaftServer) Nodes() (map[string]*protobuf.Node, error) { + cf := s.raft.GetConfiguration() + if err := cf.Error(); err != nil { + s.logger.Error("failed to get Raft configuration", zap.Error(err)) + return nil, err + } + + nodes := make(map[string]*protobuf.Node, 0) + for _, server := range cf.Configuration().Servers { + metadata, _ := s.getMetadata(string(server.ID)) + + nodes[string(server.ID)] = &protobuf.Node{ + RaftAddress: string(server.Address), + Metadata: metadata, + } + } + + return nodes, nil +} + +func (s *RaftServer) Snapshot() error { + if future := s.raft.Snapshot(); future.Error() != nil { + s.logger.Error("failed to snapshot", zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Get(id string) (map[string]interface{}, error) { + return s.fsm.get(id) +} + +func (s *RaftServer) Search(searchRequest *bleve.SearchRequest) (*bleve.SearchResult, error) { + return s.fsm.search(searchRequest) +} + +func (s *RaftServer) Set(req *protobuf.SetRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal document map to any", zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_Set, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Delete(req *protobuf.DeleteRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal id to any", zap.Error(err)) + return err + } + + c := &protobuf.Event{ + Type: protobuf.Event_Delete, + Data: dataAny, + } + + msg, err := proto.Marshal(c) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) BulkIndex(req *protobuf.BulkIndexRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal bulk index request to any", zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_BulkIndex, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) BulkDelete(req *protobuf.BulkDeleteRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal set request to any", zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_BulkDelete, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Mapping() (*protobuf.MappingResponse, error) { + resp := &protobuf.MappingResponse{} + + m := s.fsm.Mapping() + + fieldsBytes, err := json.Marshal(m) + if err != nil { + s.logger.Error("failed to marshal mapping to bytes", zap.Error(err)) + return resp, err + } + + resp.Mapping = fieldsBytes + + return resp, nil +} diff --git a/server/raft_server_test.go b/server/raft_server_test.go new file mode 100644 index 0000000..11a1b65 --- /dev/null +++ b/server/raft_server_test.go @@ -0,0 +1,1536 @@ +package server + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/util" +) + +func Test_RaftServer_Close(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + time.Sleep(10 * time.Second) +} + +func Test_RaftServer_LeaderAddress(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + leaderAddress, err := server.LeaderAddress(60 * time.Second) + if err != nil { + t.Fatalf("%v", err) + } + if raftAddress != string(leaderAddress) { + t.Fatalf("expected content to see %v, saw %v", raftAddress, string(leaderAddress)) + } +} + +func Test_RaftServer_LeaderID(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + leaderId, err := server.LeaderID(60 * time.Second) + if err != nil { + t.Fatalf("%v", err) + } + if id != string(leaderId) { + t.Fatalf("expected content to see %v, saw %v", id, string(leaderId)) + } +} + +func Test_RaftServer_WaitForDetectLeader(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftServer_State(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + state := server.State() + if raft.Leader != state { + t.Fatalf("expected content to see %v, saw %v", raft.Leader, state) + } +} + +func Test_RaftServer_StateStr(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + state := server.StateStr() + if raft.Leader.String() != state { + t.Fatalf("expected content to see %v, saw %v", raft.Leader.String(), state) + } +} + +func Test_RaftServer_Exist(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + exist, err := server.Exist(id) + if err != nil { + t.Fatalf("%v", err) + } + if !exist { + t.Fatalf("expected content to see %v, saw %v", true, exist) + } + + exist, err = server.Exist("non-existent-id") + if err != nil { + t.Fatalf("%v", err) + } + if exist { + t.Fatalf("expected content to see %v, saw %v", false, exist) + } +} + +func Test_RaftServer_setMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + metadata := &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + } + + if err := server.setMetadata(id, metadata); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftServer_getMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + metadata := &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + } + + if err := server.setMetadata(id, metadata); err != nil { + t.Fatalf("%v", err) + } + + m, err := server.getMetadata(id) + if err != nil { + t.Fatalf("%v", err) + } + if grpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, m.GrpcAddress) + } + if httpAddress != m.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, m.HttpAddress) + } +} + +func Test_RaftServer_deleteMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + metadata := &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + } + + // set + if err := server.setMetadata(id, metadata); err != nil { + t.Fatalf("%v", err) + } + + // get + m, err := server.getMetadata(id) + if err != nil { + t.Fatalf("%v", err) + } + if grpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, m.GrpcAddress) + } + if httpAddress != m.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, m.HttpAddress) + } + + // delete + if err := server.deleteMetadata(id); err != nil { + t.Fatalf("%v", err) + } + + //get + m, err = server.getMetadata(id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatalf("%v", err) + } + } + if err == nil { + t.Fatalf("expected content to see %v, saw %v", nil, err) + } +} + +func Test_RaftServer_Join(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } +} + +func Test_RaftServer_Node(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + n, err := server.Node() + if err != nil { + t.Fatalf("%v", err) + } + if raftAddress != n.RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, n.RaftAddress) + } + if grpcAddress != n.Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, n.Metadata.GrpcAddress) + } + if httpAddress != n.Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, n.Metadata.HttpAddress) + } +} + +func Test_RaftServer_Cluster(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id2 := "node2" + raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + + dir2 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir2) + }() + + indexMapping2, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger2 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server2, err := NewRaftServer(id2, raftAddress2, dir2, indexMapping2, false, logger2) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server2.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server2.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node2 := &protobuf.Node{ + RaftAddress: raftAddress2, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + if err := server.Join(id2, node2); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id3 := "node3" + raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + + dir3 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir3) + }() + + indexMapping3, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger3 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server3, err := NewRaftServer(id3, raftAddress3, dir3, indexMapping3, false, logger3) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server3.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server3.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node3 := &protobuf.Node{ + RaftAddress: raftAddress3, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + if err := server.Join(id3, node3); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + ns, err := server.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns)) + } + if raftAddress != ns[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns[id].RaftAddress) + } + if grpcAddress != ns[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns[id].Metadata.GrpcAddress) + } + if httpAddress != ns[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns[id].Metadata.HttpAddress) + } + if raftAddress2 != ns[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns[id2].RaftAddress) + } + if grpcAddress2 != ns[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns[id3].RaftAddress) + } + if grpcAddress3 != ns[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns2, err := server2.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns2) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns2)) + } + if raftAddress != ns2[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns2[id].RaftAddress) + } + if grpcAddress != ns2[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns2[id].Metadata.GrpcAddress) + } + if httpAddress != ns2[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns2[id].Metadata.HttpAddress) + } + if raftAddress2 != ns2[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns2[id2].RaftAddress) + } + if grpcAddress2 != ns2[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns2[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns2[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns2[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns2[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns2[id3].RaftAddress) + } + if grpcAddress3 != ns2[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns2[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns2[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns2[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns3, err := server3.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns3) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns3)) + } + if raftAddress != ns3[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns3[id].RaftAddress) + } + if grpcAddress != ns3[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns3[id].Metadata.GrpcAddress) + } + if httpAddress != ns3[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns3[id].Metadata.HttpAddress) + } + if raftAddress2 != ns3[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns3[id2].RaftAddress) + } + if grpcAddress2 != ns3[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns3[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns3[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns3[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns3[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns3[id3].RaftAddress) + } + if grpcAddress3 != ns3[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns3[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns3[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns3[id3].Metadata.HttpAddress) + } +} + +func Test_RaftServer_Leave(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id2 := "node2" + raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + + dir2 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir2) + }() + + indexMapping2, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger2 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server2, err := NewRaftServer(id2, raftAddress2, dir2, indexMapping2, false, logger2) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server2.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server2.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node2 := &protobuf.Node{ + RaftAddress: raftAddress2, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + if err := server.Join(id2, node2); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id3 := "node3" + raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + + dir3 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir3) + }() + + indexMapping3, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger3 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server3, err := NewRaftServer(id3, raftAddress3, dir3, indexMapping3, false, logger3) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server3.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server3.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node3 := &protobuf.Node{ + RaftAddress: raftAddress3, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + if err := server.Join(id3, node3); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + ns, err := server.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns)) + } + if raftAddress != ns[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns[id].RaftAddress) + } + if grpcAddress != ns[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns[id].Metadata.GrpcAddress) + } + if httpAddress != ns[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns[id].Metadata.HttpAddress) + } + if raftAddress2 != ns[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns[id2].RaftAddress) + } + if grpcAddress2 != ns[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns[id3].RaftAddress) + } + if grpcAddress3 != ns[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns2, err := server2.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns2) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns2)) + } + if raftAddress != ns2[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns2[id].RaftAddress) + } + if grpcAddress != ns2[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns2[id].Metadata.GrpcAddress) + } + if httpAddress != ns2[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns2[id].Metadata.HttpAddress) + } + if raftAddress2 != ns2[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns2[id2].RaftAddress) + } + if grpcAddress2 != ns2[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns2[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns2[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns2[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns2[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns2[id3].RaftAddress) + } + if grpcAddress3 != ns2[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns2[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns2[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns2[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns3, err := server3.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns3) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns3)) + } + if raftAddress != ns3[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns3[id].RaftAddress) + } + if grpcAddress != ns3[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns3[id].Metadata.GrpcAddress) + } + if httpAddress != ns3[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns3[id].Metadata.HttpAddress) + } + if raftAddress2 != ns3[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns3[id2].RaftAddress) + } + if grpcAddress2 != ns3[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns3[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns3[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns3[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns3[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns3[id3].RaftAddress) + } + if grpcAddress3 != ns3[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns3[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns3[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns3[id3].Metadata.HttpAddress) + } + + if err := server.Leave(id3); err != nil { + t.Fatalf("%v", err) + } + + ns, err = server.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 2 != len(ns) { + t.Fatalf("expected content to see %v, saw %v", 2, len(ns)) + } + if raftAddress != ns[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns[id].RaftAddress) + } + if grpcAddress != ns[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns[id].Metadata.GrpcAddress) + } + if httpAddress != ns[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns[id].Metadata.HttpAddress) + } + if raftAddress2 != ns[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns[id2].RaftAddress) + } + if grpcAddress2 != ns[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns[id2].Metadata.HttpAddress) + } + if _, ok := ns[id3]; ok { + t.Fatalf("expected content to see %v, saw %v", false, ok) + } + + time.Sleep(3 * time.Second) + + ns2, err = server2.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 2 != len(ns2) { + t.Fatalf("expected content to see %v, saw %v", 2, len(ns2)) + } + if raftAddress != ns2[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns2[id].RaftAddress) + } + if grpcAddress != ns2[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns2[id].Metadata.GrpcAddress) + } + if httpAddress != ns2[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns2[id].Metadata.HttpAddress) + } + if raftAddress2 != ns2[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns2[id2].RaftAddress) + } + if grpcAddress2 != ns2[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns2[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns2[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns2[id2].Metadata.HttpAddress) + } + if _, ok := ns2[id3]; ok { + t.Fatalf("expected content to see %v, saw %v", false, ok) + } +} + +func Test_RaftServer_Set(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftServer_Get(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } + + f1, err := server.Get(docId1) + if err != nil { + t.Fatalf("%v", err) + } + if docFieldsMap1["title"] != f1["title"] { + t.Fatalf("expected content to see %v, saw %v", docFieldsMap1["title"], f1["title"]) + } +} + +func Test_RaftServer_Delete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } + + f1, err := server.Get(docId1) + if err != nil { + t.Fatalf("%v", err) + } + if docFieldsMap1["title"] != f1["title"] { + t.Fatalf("expected content to see %v, saw %v", docFieldsMap1["title"], f1["title"]) + } + + deleteReq1 := &protobuf.DeleteRequest{ + Id: docId1, + } + + if err := server.Delete(deleteReq1); err != nil { + t.Fatalf("%v", err) + } + + f1, err = server.Get(docId1) + if err != nil { + switch err { + case errors.ErrNotFound: + //ok + default: + t.Fatalf("%v", err) + } + } + if f1 != nil { + t.Fatalf("expected content to see %v, saw %v", nil, f1) + } +} + +func Test_RaftServer_Snapshot(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } + + if err := server.Snapshot(); err != nil { + t.Fatalf("%v", err) + } +} diff --git a/sortutils/sort.go b/sortutils/sort.go deleted file mode 100644 index 9f41b7f..0000000 --- a/sortutils/sort.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sortutils - -import ( - "github.com/blevesearch/bleve/search" -) - -type MultiSearchHitSorter struct { - hits search.DocumentMatchCollection - sort search.SortOrder - cachedScoring []bool - cachedDesc []bool -} - -func NewMultiSearchHitSorter(sort search.SortOrder, hits search.DocumentMatchCollection) *MultiSearchHitSorter { - return &MultiSearchHitSorter{ - sort: sort, - hits: hits, - cachedScoring: sort.CacheIsScore(), - cachedDesc: sort.CacheDescending(), - } -} - -func (m *MultiSearchHitSorter) Len() int { - return len(m.hits) -} - -func (m *MultiSearchHitSorter) Swap(i, j int) { - m.hits[i], m.hits[j] = m.hits[j], m.hits[i] -} - -func (m *MultiSearchHitSorter) Less(i, j int) bool { - c := m.sort.Compare(m.cachedScoring, m.cachedDesc, m.hits[i], m.hits[j]) - - return c < 0 -} diff --git a/storage/index.go b/storage/index.go new file mode 100644 index 0000000..26ba05d --- /dev/null +++ b/storage/index.go @@ -0,0 +1,269 @@ +package storage + +import ( + "os" + "time" + + "github.com/blevesearch/bleve/v2" + "github.com/blevesearch/bleve/v2/index/scorch" + "github.com/blevesearch/bleve/v2/mapping" + bleveindex "github.com/blevesearch/bleve_index_api" + _ "github.com/mosuka/blast/builtin" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" +) + +type Index struct { + indexMapping *mapping.IndexMappingImpl + logger *zap.Logger + + index bleve.Index +} + +func NewIndex(dir string, indexMapping *mapping.IndexMappingImpl, logger *zap.Logger) (*Index, error) { + var index bleve.Index + + if _, err := os.Stat(dir); os.IsNotExist(err) { + // create new index + index, err = bleve.NewUsing(dir, indexMapping, scorch.Name, scorch.Name, nil) + if err != nil { + logger.Error("failed to create index", zap.String("dir", dir), zap.Error(err)) + return nil, err + } + } else { + // open existing index + index, err = bleve.OpenUsing(dir, map[string]interface{}{ + "create_if_missing": false, + "error_if_exists": false, + }) + if err != nil { + logger.Error("failed to open index", zap.String("dir", dir), zap.Error(err)) + return nil, err + } + } + + return &Index{ + index: index, + indexMapping: indexMapping, + logger: logger, + }, nil +} + +func (i *Index) Close() error { + if err := i.index.Close(); err != nil { + i.logger.Error("failed to close index", zap.Error(err)) + return err + } + + return nil +} + +func (i *Index) Get(id string) (map[string]interface{}, error) { + doc, err := i.index.Document(id) + if err != nil { + i.logger.Error("failed to get document", zap.String("id", id), zap.Error(err)) + return nil, err + } + if doc == nil { + err := errors.ErrNotFound + i.logger.Debug("document does not found", zap.String("id", id), zap.Error(err)) + return nil, err + } + + fields := make(map[string]interface{}, 0) + doc.VisitFields(func(field bleveindex.Field) { + var v interface{} + switch field := field.(type) { + case bleveindex.TextField: + v = field.Text() + case bleveindex.NumericField: + n, err := field.Number() + if err == nil { + v = n + } + case bleveindex.DateTimeField: + d, err := field.DateTime() + if err == nil { + v = d.Format(time.RFC3339Nano) + } + } + existing, existed := fields[field.Name()] + if existed { + switch existing := existing.(type) { + case []interface{}: + fields[field.Name()] = append(existing, v) + case interface{}: + arr := make([]interface{}, 2) + arr[0] = existing + arr[1] = v + fields[field.Name()] = arr + } + } else { + fields[field.Name()] = v + } + }) + + return fields, nil +} + +func (i *Index) Search(searchRequest *bleve.SearchRequest) (*bleve.SearchResult, error) { + searchResult, err := i.index.Search(searchRequest) + if err != nil { + i.logger.Error("failed to search documents", zap.Any("search_request", searchRequest), zap.Error(err)) + return nil, err + } + + return searchResult, nil +} + +func (i *Index) Index(id string, fields map[string]interface{}) error { + if err := i.index.Index(id, fields); err != nil { + i.logger.Error("failed to index document", zap.String("id", id), zap.Error(err)) + return err + } + + return nil +} + +func (i *Index) Delete(id string) error { + if err := i.index.Delete(id); err != nil { + i.logger.Error("failed to delete document", zap.String("id", id), zap.Error(err)) + return err + } + + return nil +} + +func (i *Index) BulkIndex(docs []map[string]interface{}) (int, error) { + batch := i.index.NewBatch() + + count := 0 + + for _, doc := range docs { + id, ok := doc["id"].(string) + if !ok { + err := errors.ErrNil + i.logger.Error("missing id", zap.Error(err)) + continue + } + fields, ok := doc["fields"].(map[string]interface{}) + if !ok { + err := errors.ErrNil + i.logger.Error("missing fields", zap.Error(err)) + continue + } + + if err := batch.Index(id, fields); err != nil { + i.logger.Error("failed to index document in batch", zap.String("id", id), zap.Error(err)) + continue + } + count++ + } + + err := i.index.Batch(batch) + if err != nil { + i.logger.Error("failed to index documents", zap.Int("count", count), zap.Error(err)) + return count, err + } + + if count <= 0 { + err := errors.ErrNoUpdate + i.logger.Error("no documents updated", zap.Any("count", count), zap.Error(err)) + return count, err + } + + return count, nil +} + +func (i *Index) BulkDelete(ids []string) (int, error) { + batch := i.index.NewBatch() + + count := 0 + + for _, id := range ids { + batch.Delete(id) + count++ + } + + err := i.index.Batch(batch) + if err != nil { + i.logger.Error("failed to delete documents", zap.Int("count", count), zap.Error(err)) + return count, err + } + + return count, nil +} + +func (i *Index) Mapping() *mapping.IndexMappingImpl { + return i.indexMapping +} + +func (i *Index) Stats() map[string]interface{} { + return i.index.StatsMap() +} + +func (i *Index) SnapshotItems() <-chan *protobuf.Document { + ch := make(chan *protobuf.Document, 1024) + + go func() { + idx, err := i.index.Advanced() + if err != nil { + i.logger.Error("failed to get index", zap.Error(err)) + return + } + + ir, err := idx.Reader() + if err != nil { + i.logger.Error("failed to get index reader", zap.Error(err)) + return + } + + docCount := 0 + + dr, err := ir.DocIDReaderAll() + if err != nil { + i.logger.Error("failed to get doc ID reader", zap.Error(err)) + return + } + for { + //if dr == nil { + // i.logger.Error(err.Error()) + // break + //} + id, err := dr.Next() + if id == nil { + i.logger.Debug("finished to read all document IDs") + break + } else if err != nil { + i.logger.Warn("failed to get doc ID", zap.Error(err)) + continue + } + + // get original document + fieldsBytes, err := i.index.GetInternal(id) + if err != nil { + i.logger.Warn("failed to get doc fields bytes", zap.String("id", string(id)), zap.Error(err)) + continue + } + + doc := &protobuf.Document{ + Id: string(id), + Fields: fieldsBytes, + } + + ch <- doc + + docCount = docCount + 1 + } + + i.logger.Debug("finished to write all documents to channel") + ch <- nil + + i.logger.Info("finished to snapshot", zap.Int("count", docCount)) + + return + }() + + return ch +} diff --git a/storage/index_test.go b/storage/index_test.go new file mode 100644 index 0000000..72bd723 --- /dev/null +++ b/storage/index_test.go @@ -0,0 +1,341 @@ +package storage + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/util" +) + +func TestClose(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestIndex(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if err := index.Index(id, fields); err != nil { + t.Fatal("failed to index document") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestGet(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if err := index.Index(id, fields); err != nil { + t.Fatal("failed to index document") + } + + f, err := index.Get(id) + if err != nil { + t.Fatal("failed to get document") + } + if fields["title"].(string) != f["title"].(string) { + t.Fatalf("expected content to see %v, saw %v", fields["title"].(string), f["title"].(string)) + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestDelete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if err := index.Index(id, fields); err != nil { + t.Fatal("failed to index document") + } + + fields, err = index.Get(id) + if err != nil { + t.Fatal("failed to get document") + } + + if err := index.Delete(id); err != nil { + t.Fatal("failed to delete document") + } + + fields, err = index.Get(id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatal("failed to get document") + } + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestBulkIndex(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + docs := make([]map[string]interface{}, 0) + for i := 1; i <= 100; i++ { + id := strconv.Itoa(i) + fields := map[string]interface{}{ + "title": fmt.Sprintf("Search engine (computing) %d", i), + "text": fmt.Sprintf("A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web. %d", i), + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + doc := map[string]interface{}{ + "id": id, + "fields": fields, + } + + docs = append(docs, doc) + } + + count, err := index.BulkIndex(docs) + if err != nil { + t.Fatal("failed to index documents") + } + if count <= 0 { + t.Fatal("failed to index documents") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestBulkDelete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + docs := make([]map[string]interface{}, 0) + for i := 1; i <= 100; i++ { + id := strconv.Itoa(i) + fields := map[string]interface{}{ + "title": fmt.Sprintf("Search engine (computing) %d", i), + "text": fmt.Sprintf("A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web. %d", i), + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + doc := map[string]interface{}{ + "id": id, + "fields": fields, + } + + docs = append(docs, doc) + } + + count, err := index.BulkIndex(docs) + if err != nil { + t.Fatal("failed to index documents") + } + if count <= 0 { + t.Fatal("failed to index documents") + } + + ids := make([]string, 0) + for i := 1; i <= 100; i++ { + id := strconv.Itoa(i) + + ids = append(ids, id) + } + + count, err = index.BulkDelete(ids) + if err != nil { + t.Fatal("failed to delete documents") + } + if count <= 0 { + t.Fatal("failed to delete documents") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} diff --git a/strutils/strutils.go b/strutils/strutils.go deleted file mode 100644 index 4ea086d..0000000 --- a/strutils/strutils.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strutils - -import ( - "math/rand" - "time" -) - -var randSrc = rand.NewSource(time.Now().UnixNano()) - -const ( - letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - letterIdxBits = 6 - letterIdxMask = 1<= 0; { - if remain == 0 { - cache, remain = randSrc.Int63(), letterIdxMax - } - idx := int(cache & letterIdxMask) - if idx < len(letters) { - b[i] = letters[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - - return string(b) -} diff --git a/testutils/testutils.go b/testutils/testutils.go deleted file mode 100644 index ecae708..0000000 --- a/testutils/testutils.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutils - -import ( - "fmt" - "io/ioutil" - "net" - - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/indexutils" -) - -func TmpDir() string { - tmp, _ := ioutil.TempDir("", "") - return tmp -} - -func TmpPort() int { - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err != nil { - return -1 - } - - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return -1 - } - - defer func() { - _ = l.Close() - }() - - return l.Addr().(*net.TCPAddr).Port -} - -func TmpNodeConfig() *config.NodeConfig { - c := config.DefaultNodeConfig() - - c.BindAddr = fmt.Sprintf(":%d", TmpPort()) - c.GRPCAddr = fmt.Sprintf(":%d", TmpPort()) - c.HTTPAddr = fmt.Sprintf(":%d", TmpPort()) - c.DataDir = TmpDir() - - return c -} - -func TmpIndexConfig(indexMappingFile string, indexType string, indexStorageType string) (*config.IndexConfig, error) { - indexMapping, err := indexutils.NewIndexMappingFromFile(indexMappingFile) - if err != nil { - return config.DefaultIndexConfig(), err - } - - indexConfig := &config.IndexConfig{ - IndexMapping: indexMapping, - IndexType: indexType, - IndexStorageType: indexStorageType, - } - - return indexConfig, nil -} diff --git a/util/temp.go b/util/temp.go new file mode 100644 index 0000000..8f3208f --- /dev/null +++ b/util/temp.go @@ -0,0 +1,29 @@ +package util + +import ( + "io/ioutil" + "net" +) + +func TmpDir() string { + tmp, _ := ioutil.TempDir("", "") + return tmp +} + +func TmpPort() int { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return -1 + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return -1 + } + + defer func() { + _ = l.Close() + }() + + return l.Addr().(*net.TCPAddr).Port +} diff --git a/version/version.go b/version/version.go index 328268a..1895fc9 100644 --- a/version/version.go +++ b/version/version.go @@ -1,17 +1,3 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package version var (