From 49c1549745118eb0d9ab0cf4ac7957d4ccf2bc41 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Thu, 27 Jun 2019 15:26:54 +0900 Subject: [PATCH 1/9] Addd grpc_prometheus --- dispatcher/grpc_service.go | 4 - dispatcher/http_handler.go | 21 ++-- dispatcher/metric.go | 61 --------- go.mod | 6 + go.sum | 12 ++ grpc/server.go | 29 ++++- http/metric.go | 194 ++++++++++++++--------------- indexer/grpc_service.go | 26 ---- indexer/http_handler.go | 21 ++-- indexer/index.go | 4 + indexer/metric.go | 61 --------- manager/grpc_service.go | 8 -- manager/http_router.go | 17 ++- manager/metric.go | 61 --------- protobuf/blast.pb.go | 246 ++++++++++++++++++------------------- protobuf/blast.proto | 2 +- 16 files changed, 299 insertions(+), 474 deletions(-) delete mode 100644 dispatcher/metric.go delete mode 100644 indexer/metric.go delete mode 100644 manager/metric.go diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index 18bd545..229b18c 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -439,9 +439,6 @@ func (s *GRPCService) getIndexerClients() map[string]*grpc.Client { } func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocumentRequest) (*protobuf.GetDocumentResponse, error) { - start := time.Now() - defer RecordMetrics(start, "get") - indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -506,7 +503,6 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { start := time.Now() - defer RecordMetrics(start, "search") resp := &protobuf.SearchResponse{} diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go index b03b56f..388d760 100644 --- a/dispatcher/http_handler.go +++ b/dispatcher/http_handler.go @@ -19,7 +19,6 @@ import ( "io/ioutil" "log" "net/http" - "time" "github.com/blevesearch/bleve" "github.com/gorilla/mux" @@ -61,12 +60,12 @@ func NewRootHandler(logger *log.Logger) *RootHandler { } func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() status := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, status, h.logger) - blasthttp.RecordMetrics(start, status, w, r, h.logger) + //blasthttp.RecordMetrics(start, status, w, r, h.logger) }() msgMap := map[string]interface{}{ @@ -93,12 +92,12 @@ func NewGetDocumentHandler(client *grpc.Client, logger *log.Logger) *GetHandler } func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() vars := mux.Vars(r) @@ -157,12 +156,12 @@ func NewSetDocumentHandler(client *grpc.Client, logger *log.Logger) *IndexHandle } func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() // create documents @@ -287,12 +286,12 @@ func NewDeleteDocumentHandler(client *grpc.Client, logger *log.Logger) *DeleteHa } func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() // create documents @@ -394,12 +393,12 @@ func NewSearchHandler(client *grpc.Client, logger *log.Logger) *SearchHandler { } func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() searchRequestBytes, err := ioutil.ReadAll(r.Body) diff --git a/dispatcher/metric.go b/dispatcher/metric.go deleted file mode 100644 index adee6e0..0000000 --- a/dispatcher/metric.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - namespace = "blast" - subsystem = "federator" - - DurationSeconds = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "duration_seconds", - Help: "The index operation durations in seconds.", - }, - []string{ - "func", - }, - ) - OperationsTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "operations_total", - Help: "The number of index operations.", - }, - []string{ - "func", - }, - ) -) - -func init() { - prometheus.MustRegister(DurationSeconds) - prometheus.MustRegister(OperationsTotal) -} - -func RecordMetrics(start time.Time, funcName string) { - DurationSeconds.With(prometheus.Labels{"func": funcName}).Observe(float64(time.Since(start)) / float64(time.Second)) - OperationsTotal.With(prometheus.Labels{"func": funcName}).Inc() - - return -} diff --git a/go.mod b/go.mod index 9e95dfd..0debc24 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,8 @@ require ( github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/golang/protobuf v1.3.1 github.com/gorilla/mux v1.7.0 + github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/go-immutable-radix v1.0.0 // indirect github.com/hashicorp/go-msgpack v0.5.3 // indirect github.com/hashicorp/golang-lru v0.5.1 // indirect @@ -30,6 +32,7 @@ require ( github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68 github.com/mosuka/logutils v0.1.2 + github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pascaldekloe/goe v0.1.0 // indirect github.com/prometheus/client_golang v0.9.2 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect @@ -41,6 +44,9 @@ require ( github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19 // indirect github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 // indirect github.com/urfave/cli v1.20.0 + go.uber.org/atomic v1.4.0 // indirect + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.10.0 // indirect golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 // indirect google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d // indirect google.golang.org/grpc v1.19.1 diff --git a/go.sum b/go.sum index 535ee7a..2e15326 100644 --- a/go.sum +++ b/go.sum @@ -79,6 +79,10 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= @@ -125,6 +129,8 @@ github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= @@ -174,6 +180,12 @@ github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= diff --git a/grpc/server.go b/grpc/server.go index 9dbee30..308c57a 100644 --- a/grpc/server.go +++ b/grpc/server.go @@ -18,6 +18,13 @@ import ( "log" "net" + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" + //grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" + //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" + //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" + //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/mosuka/blast/protobuf" "google.golang.org/grpc" ) @@ -31,10 +38,30 @@ type Server struct { } func NewServer(grpcAddr string, service protobuf.BlastServer, logger *log.Logger) (*Server, error) { - server := grpc.NewServer() + server := grpc.NewServer( + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( + //grpc_ctxtags.StreamServerInterceptor(), + //grpc_opentracing.StreamServerInterceptor(), + grpc_prometheus.StreamServerInterceptor, + //grpc_zap.StreamServerInterceptor(zapLogger), + //grpc_auth.StreamServerInterceptor(myAuthFunction), + //grpc_recovery.StreamServerInterceptor(), + )), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( + //grpc_ctxtags.UnaryServerInterceptor(), + //grpc_opentracing.UnaryServerInterceptor(), + grpc_prometheus.UnaryServerInterceptor, + //grpc_zap.UnaryServerInterceptor(zapLogger), + //grpc_auth.UnaryServerInterceptor(myAuthFunction), + //grpc_recovery.UnaryServerInterceptor(), + )), + ) protobuf.RegisterBlastServer(server, service) + grpc_prometheus.EnableHandlingTimeHistogram() + grpc_prometheus.Register(server) + listener, err := net.Listen("tcp", grpcAddr) if err != nil { return nil, err diff --git a/http/metric.go b/http/metric.go index dbae656..4ca3bad 100644 --- a/http/metric.go +++ b/http/metric.go @@ -14,100 +14,100 @@ package http -import ( - "log" - "net/http" - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - namespace = "blast" - subsystem = "http" - - DurationSeconds = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "duration_seconds", - Help: "The invocation duration in seconds.", - }, - []string{ - "request_uri", - }, - ) - - RequestsTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "requests_total", - Help: "The number of requests.", - }, - []string{ - "request_uri", - "method", - }, - ) - - ResponsesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "responses_total", - Help: "The number of responses.", - }, - []string{ - "request_uri", - "status", - }, - ) - - RequestsBytesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "requests_bytes_total", - Help: "A summary of the invocation requests bytes.", - }, - []string{ - "request_uri", - "method", - }, - ) - - ResponsesBytesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "responses_bytes_total", - Help: "A summary of the invocation responses bytes.", - }, - []string{ - "request_uri", - "method", - }, - ) -) - -func init() { - prometheus.MustRegister(DurationSeconds) - prometheus.MustRegister(RequestsTotal) - prometheus.MustRegister(ResponsesTotal) - prometheus.MustRegister(RequestsBytesTotal) - prometheus.MustRegister(ResponsesBytesTotal) -} - -func RecordMetrics(start time.Time, status int, writer http.ResponseWriter, request *http.Request, logger *log.Logger) { - DurationSeconds.With(prometheus.Labels{"request_uri": request.RequestURI}).Observe(float64(time.Since(start)) / float64(time.Second)) - RequestsTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "method": request.Method}).Inc() - ResponsesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "status": strconv.Itoa(status)}).Inc() - RequestsBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "method": request.Method}).Add(float64(request.ContentLength)) - contentLength, err := strconv.ParseFloat(writer.Header().Get("Content-Length"), 64) - if err != nil { - logger.Printf("[ERR] Failed to parse content length: %v", err) - } - ResponsesBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "method": request.Method}).Add(contentLength) -} +//import ( +// "log" +// "net/http" +// "strconv" +// "time" +// +// "github.com/prometheus/client_golang/prometheus" +//) +// +//var ( +// namespace = "blast" +// subsystem = "http" +// +// DurationSeconds = prometheus.NewHistogramVec( +// prometheus.HistogramOpts{ +// Namespace: namespace, +// Subsystem: subsystem, +// Name: "duration_seconds", +// Help: "The invocation duration in seconds.", +// }, +// []string{ +// "request_uri", +// }, +// ) +// +// RequestsTotal = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Namespace: namespace, +// Subsystem: subsystem, +// Name: "requests_total", +// Help: "The number of requests.", +// }, +// []string{ +// "request_uri", +// "method", +// }, +// ) +// +// ResponsesTotal = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Namespace: namespace, +// Subsystem: subsystem, +// Name: "responses_total", +// Help: "The number of responses.", +// }, +// []string{ +// "request_uri", +// "status", +// }, +// ) +// +// RequestsBytesTotal = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Namespace: namespace, +// Subsystem: subsystem, +// Name: "requests_bytes_total", +// Help: "A summary of the invocation requests bytes.", +// }, +// []string{ +// "request_uri", +// "method", +// }, +// ) +// +// ResponsesBytesTotal = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Namespace: namespace, +// Subsystem: subsystem, +// Name: "responses_bytes_total", +// Help: "A summary of the invocation responses bytes.", +// }, +// []string{ +// "request_uri", +// "method", +// }, +// ) +//) +// +//func init() { +// prometheus.MustRegister(DurationSeconds) +// prometheus.MustRegister(RequestsTotal) +// prometheus.MustRegister(ResponsesTotal) +// prometheus.MustRegister(RequestsBytesTotal) +// prometheus.MustRegister(ResponsesBytesTotal) +//} +// +//func RecordMetrics(start time.Time, status int, writer http.ResponseWriter, request *http.Request, logger *log.Logger) { +// DurationSeconds.With(prometheus.Labels{"request_uri": request.RequestURI}).Observe(float64(time.Since(start)) / float64(time.Second)) +// RequestsTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "method": request.Method}).Inc() +// ResponsesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "status": strconv.Itoa(status)}).Inc() +// RequestsBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "method": request.Method}).Add(float64(request.ContentLength)) +// contentLength, err := strconv.ParseFloat(writer.Header().Get("Content-Length"), 64) +// if err != nil { +// logger.Printf("[ERR] Failed to parse content length: %v", err) +// } +// ResponsesBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "method": request.Method}).Add(contentLength) +//} diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 680709d..d3a69c5 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -656,8 +656,6 @@ func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_Watch } func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - s.logger.Printf("[INFO] %v", req) - resp := &empty.Empty{} err := s.raftServer.Snapshot() @@ -669,11 +667,6 @@ func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Em } func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocumentRequest) (*protobuf.GetDocumentResponse, error) { - start := time.Now() - defer RecordMetrics(start, "get") - - s.logger.Printf("[INFO] get %v", req) - resp := &protobuf.GetDocumentResponse{} fields, err := s.raftServer.GetDocument(req.Id) @@ -698,11 +691,6 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument } func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { - start := time.Now() - defer RecordMetrics(start, "search") - - s.logger.Printf("[INFO] search %v", req) - resp := &protobuf.SearchResponse{} // Any -> bleve.SearchRequest @@ -826,15 +814,8 @@ func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) } func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexConfigResponse, error) { - start := time.Now() - defer RecordMetrics(start, "indexconfig") - resp := &protobuf.GetIndexConfigResponse{} - s.logger.Printf("[INFO] stats %v", req) - - var err error - indexConfig, err := s.raftServer.GetIndexConfig() if err != nil { return resp, status.Error(codes.Internal, err.Error()) @@ -852,15 +833,8 @@ func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*pr } func (s *GRPCService) GetIndexStats(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexStatsResponse, error) { - start := time.Now() - defer RecordMetrics(start, "indexstats") - resp := &protobuf.GetIndexStatsResponse{} - s.logger.Printf("[INFO] stats %v", req) - - var err error - indexStats, err := s.raftServer.GetIndexStats() if err != nil { return resp, status.Error(codes.Internal, err.Error()) diff --git a/indexer/http_handler.go b/indexer/http_handler.go index 280b895..12dbda1 100644 --- a/indexer/http_handler.go +++ b/indexer/http_handler.go @@ -19,7 +19,6 @@ import ( "io/ioutil" "log" "net/http" - "time" "github.com/blevesearch/bleve" "github.com/gorilla/mux" @@ -61,12 +60,12 @@ func NewRootHandler(logger *log.Logger) *RootHandler { } func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() status := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, status, h.logger) - blasthttp.RecordMetrics(start, status, w, r, h.logger) + //blasthttp.RecordMetrics(start, status, w, r, h.logger) }() msgMap := map[string]interface{}{ @@ -93,12 +92,12 @@ func NewGetDocumentHandler(client *grpc.Client, logger *log.Logger) *GetHandler } func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() vars := mux.Vars(r) @@ -159,12 +158,12 @@ func NewSetDocumentHandler(client *grpc.Client, logger *log.Logger) *IndexHandle } func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() // create documents @@ -289,12 +288,12 @@ func NewDeleteDocumentHandler(client *grpc.Client, logger *log.Logger) *DeleteHa } func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() // create documents @@ -396,12 +395,12 @@ func NewSearchHandler(client *grpc.Client, logger *log.Logger) *SearchHandler { } func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() searchRequestBytes, err := ioutil.ReadAll(r.Body) diff --git a/indexer/index.go b/indexer/index.go index 247bb6b..db71e78 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -209,6 +209,10 @@ func (i *Index) SnapshotItems() <-chan *protobuf.Document { dr, err := r.DocIDReaderAll() for { + if dr == nil { + i.logger.Printf("[ERR] %v", err) + break + } id, err := dr.Next() if id == nil { i.logger.Print("[DEBUG] finished to read all document ids") diff --git a/indexer/metric.go b/indexer/metric.go deleted file mode 100644 index a85d552..0000000 --- a/indexer/metric.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2018 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - namespace = "blast" - subsystem = "indexer" - - DurationSeconds = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "duration_seconds", - Help: "The index operation durations in seconds.", - }, - []string{ - "func", - }, - ) - OperationsTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "operations_total", - Help: "The number of index operations.", - }, - []string{ - "func", - }, - ) -) - -func init() { - prometheus.MustRegister(DurationSeconds) - prometheus.MustRegister(OperationsTotal) -} - -func RecordMetrics(start time.Time, funcName string) { - DurationSeconds.With(prometheus.Labels{"func": funcName}).Observe(float64(time.Since(start)) / float64(time.Second)) - OperationsTotal.With(prometheus.Labels{"func": funcName}).Inc() - - return -} diff --git a/manager/grpc_service.go b/manager/grpc_service.go index 3e21af0..23d761e 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -434,11 +434,9 @@ func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_Watch } func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - start := time.Now() s.stateMutex.Lock() defer func() { s.stateMutex.Unlock() - RecordMetrics(start, "snapshot") }() resp := &empty.Empty{} @@ -452,11 +450,9 @@ func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Em } func (s *GRPCService) GetState(ctx context.Context, req *protobuf.GetStateRequest) (*protobuf.GetStateResponse, error) { - start := time.Now() s.stateMutex.RLock() defer func() { s.stateMutex.RUnlock() - RecordMetrics(start, "get") }() resp := &protobuf.GetStateResponse{} @@ -483,11 +479,9 @@ func (s *GRPCService) GetState(ctx context.Context, req *protobuf.GetStateReques } func (s *GRPCService) SetState(ctx context.Context, req *protobuf.SetStateRequest) (*empty.Empty, error) { - start := time.Now() s.stateMutex.Lock() defer func() { s.stateMutex.Unlock() - RecordMetrics(start, "set") }() resp := &empty.Empty{} @@ -532,11 +526,9 @@ func (s *GRPCService) SetState(ctx context.Context, req *protobuf.SetStateReques } func (s *GRPCService) DeleteState(ctx context.Context, req *protobuf.DeleteStateRequest) (*empty.Empty, error) { - start := time.Now() s.stateMutex.Lock() defer func() { s.stateMutex.Unlock() - RecordMetrics(start, "delete") }() s.logger.Printf("[INFO] set %v", req) diff --git a/manager/http_router.go b/manager/http_router.go index ff41e7f..04c0caf 100644 --- a/manager/http_router.go +++ b/manager/http_router.go @@ -19,7 +19,6 @@ import ( "io/ioutil" "log" "net/http" - "time" "github.com/gorilla/mux" blasterrors "github.com/mosuka/blast/errors" @@ -60,12 +59,12 @@ func NewRootHandler(logger *log.Logger) *RootHandler { } func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() status := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, status, h.logger) - blasthttp.RecordMetrics(start, status, w, r, h.logger) + //blasthttp.RecordMetrics(start, status, w, r, h.logger) }() msgMap := map[string]interface{}{ @@ -92,12 +91,12 @@ func NewGetHandler(client *grpc.Client, logger *log.Logger) *GetHandler { } func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() vars := mux.Vars(r) @@ -158,12 +157,12 @@ func NewPutHandler(client *grpc.Client, logger *log.Logger) *PutHandler { } func (h *PutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() vars := mux.Vars(r) @@ -237,12 +236,12 @@ func NewDeleteHandler(client *grpc.Client, logger *log.Logger) *DeleteHandler { } func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() + //start := time.Now() httpStatus := http.StatusOK content := make([]byte, 0) defer func() { blasthttp.WriteResponse(w, content, httpStatus, h.logger) - blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) + //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) }() vars := mux.Vars(r) diff --git a/manager/metric.go b/manager/metric.go deleted file mode 100644 index 17a326e..0000000 --- a/manager/metric.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2018 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - namespace = "blast" - subsystem = "manager" - - DurationSeconds = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "duration_seconds", - Help: "The index operation durations in seconds.", - }, - []string{ - "func", - }, - ) - OperationsTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "operations_total", - Help: "The number of index operations.", - }, - []string{ - "func", - }, - ) -) - -func init() { - prometheus.MustRegister(DurationSeconds) - prometheus.MustRegister(OperationsTotal) -} - -func RecordMetrics(start time.Time, funcName string) { - DurationSeconds.With(prometheus.Labels{"func": funcName}).Observe(float64(time.Since(start)) / float64(time.Second)) - OperationsTotal.With(prometheus.Labels{"func": funcName}).Inc() - - return -} diff --git a/protobuf/blast.pb.go b/protobuf/blast.pb.go index 5003cc0..55fe885 100644 --- a/protobuf/blast.pb.go +++ b/protobuf/blast.pb.go @@ -110,7 +110,7 @@ func (WatchStateResponse_Command) EnumDescriptor() ([]byte, []int) { // use for health check type LivenessProbeResponse struct { - State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.LivenessProbeResponse_State" json:"state,omitempty"` + State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=protobuf.LivenessProbeResponse_State" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -150,7 +150,7 @@ func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { // use for health check type ReadinessProbeResponse struct { - State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.ReadinessProbeResponse_State" json:"state,omitempty"` + State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=protobuf.ReadinessProbeResponse_State" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -608,7 +608,7 @@ func (m *WatchStateRequest) GetKey() string { } type WatchStateResponse struct { - Command WatchStateResponse_Command `protobuf:"varint,1,opt,name=command,proto3,enum=index.WatchStateResponse_Command" json:"command,omitempty"` + Command WatchStateResponse_Command `protobuf:"varint,1,opt,name=command,proto3,enum=protobuf.WatchStateResponse_Command" json:"command,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1109,98 +1109,98 @@ func (m *Document) GetFields() *any.Any { } func init() { - proto.RegisterEnum("index.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) - proto.RegisterEnum("index.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) - proto.RegisterEnum("index.WatchStateResponse_Command", WatchStateResponse_Command_name, WatchStateResponse_Command_value) - proto.RegisterType((*LivenessProbeResponse)(nil), "index.LivenessProbeResponse") - proto.RegisterType((*ReadinessProbeResponse)(nil), "index.ReadinessProbeResponse") - proto.RegisterType((*GetNodeRequest)(nil), "index.GetNodeRequest") - proto.RegisterType((*GetNodeResponse)(nil), "index.GetNodeResponse") - proto.RegisterType((*SetNodeRequest)(nil), "index.SetNodeRequest") - proto.RegisterType((*DeleteNodeRequest)(nil), "index.DeleteNodeRequest") - proto.RegisterType((*GetClusterResponse)(nil), "index.GetClusterResponse") - proto.RegisterType((*GetStateRequest)(nil), "index.GetStateRequest") - proto.RegisterType((*GetStateResponse)(nil), "index.GetStateResponse") - proto.RegisterType((*SetStateRequest)(nil), "index.SetStateRequest") - proto.RegisterType((*DeleteStateRequest)(nil), "index.DeleteStateRequest") - proto.RegisterType((*WatchStateRequest)(nil), "index.WatchStateRequest") - proto.RegisterType((*WatchStateResponse)(nil), "index.WatchStateResponse") - proto.RegisterType((*GetDocumentRequest)(nil), "index.GetDocumentRequest") - proto.RegisterType((*GetDocumentResponse)(nil), "index.GetDocumentResponse") - proto.RegisterType((*IndexDocumentRequest)(nil), "index.IndexDocumentRequest") - proto.RegisterType((*IndexDocumentResponse)(nil), "index.IndexDocumentResponse") - proto.RegisterType((*DeleteDocumentRequest)(nil), "index.DeleteDocumentRequest") - proto.RegisterType((*DeleteDocumentResponse)(nil), "index.DeleteDocumentResponse") - proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") - proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") - proto.RegisterType((*GetIndexConfigResponse)(nil), "index.GetIndexConfigResponse") - proto.RegisterType((*GetIndexStatsResponse)(nil), "index.GetIndexStatsResponse") - proto.RegisterType((*Document)(nil), "index.Document") + proto.RegisterEnum("protobuf.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) + proto.RegisterEnum("protobuf.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) + proto.RegisterEnum("protobuf.WatchStateResponse_Command", WatchStateResponse_Command_name, WatchStateResponse_Command_value) + proto.RegisterType((*LivenessProbeResponse)(nil), "protobuf.LivenessProbeResponse") + proto.RegisterType((*ReadinessProbeResponse)(nil), "protobuf.ReadinessProbeResponse") + proto.RegisterType((*GetNodeRequest)(nil), "protobuf.GetNodeRequest") + proto.RegisterType((*GetNodeResponse)(nil), "protobuf.GetNodeResponse") + proto.RegisterType((*SetNodeRequest)(nil), "protobuf.SetNodeRequest") + proto.RegisterType((*DeleteNodeRequest)(nil), "protobuf.DeleteNodeRequest") + proto.RegisterType((*GetClusterResponse)(nil), "protobuf.GetClusterResponse") + proto.RegisterType((*GetStateRequest)(nil), "protobuf.GetStateRequest") + proto.RegisterType((*GetStateResponse)(nil), "protobuf.GetStateResponse") + proto.RegisterType((*SetStateRequest)(nil), "protobuf.SetStateRequest") + proto.RegisterType((*DeleteStateRequest)(nil), "protobuf.DeleteStateRequest") + proto.RegisterType((*WatchStateRequest)(nil), "protobuf.WatchStateRequest") + proto.RegisterType((*WatchStateResponse)(nil), "protobuf.WatchStateResponse") + proto.RegisterType((*GetDocumentRequest)(nil), "protobuf.GetDocumentRequest") + proto.RegisterType((*GetDocumentResponse)(nil), "protobuf.GetDocumentResponse") + proto.RegisterType((*IndexDocumentRequest)(nil), "protobuf.IndexDocumentRequest") + proto.RegisterType((*IndexDocumentResponse)(nil), "protobuf.IndexDocumentResponse") + proto.RegisterType((*DeleteDocumentRequest)(nil), "protobuf.DeleteDocumentRequest") + proto.RegisterType((*DeleteDocumentResponse)(nil), "protobuf.DeleteDocumentResponse") + proto.RegisterType((*SearchRequest)(nil), "protobuf.SearchRequest") + proto.RegisterType((*SearchResponse)(nil), "protobuf.SearchResponse") + proto.RegisterType((*GetIndexConfigResponse)(nil), "protobuf.GetIndexConfigResponse") + proto.RegisterType((*GetIndexStatsResponse)(nil), "protobuf.GetIndexStatsResponse") + proto.RegisterType((*Document)(nil), "protobuf.Document") } func init() { proto.RegisterFile("protobuf/blast.proto", fileDescriptor_406ca165ef12c7d5) } var fileDescriptor_406ca165ef12c7d5 = []byte{ - // 936 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xed, 0x6e, 0xdb, 0x36, - 0x14, 0xb5, 0x94, 0x39, 0x76, 0xae, 0x63, 0xd7, 0xe5, 0x6c, 0xaf, 0x51, 0x5b, 0x20, 0x63, 0xf6, - 0xd1, 0x7d, 0x29, 0x45, 0x86, 0xa1, 0xdb, 0xba, 0x0d, 0x73, 0x62, 0x2d, 0x0b, 0x1a, 0x38, 0xab, - 0x94, 0xad, 0xe8, 0xfe, 0x04, 0xb2, 0xc4, 0x24, 0x42, 0x6d, 0xc9, 0xb3, 0xa8, 0x62, 0x01, 0xf6, - 0x6f, 0xaf, 0xb5, 0x57, 0xd9, 0xbb, 0x14, 0x22, 0x29, 0x5a, 0x92, 0x65, 0x29, 0x40, 0xfe, 0x59, - 0xd4, 0xb9, 0xe7, 0xf0, 0x5e, 0x52, 0xe7, 0x24, 0xd0, 0x9b, 0x2f, 0x02, 0x1a, 0x4c, 0xa2, 0xcb, - 0xfd, 0xc9, 0xd4, 0x0e, 0xa9, 0xce, 0x1e, 0x51, 0xdd, 0xf3, 0x5d, 0xf2, 0xb7, 0xb6, 0x73, 0x15, - 0x04, 0x57, 0x53, 0xb2, 0x2f, 0x31, 0xb6, 0x7f, 0xc3, 0x11, 0xda, 0xc3, 0xfc, 0x2b, 0x32, 0x9b, - 0x53, 0xf1, 0x12, 0xff, 0x03, 0xfd, 0x53, 0xef, 0x2d, 0xf1, 0x49, 0x18, 0xfe, 0xb6, 0x08, 0x26, - 0xc4, 0x24, 0xe1, 0x3c, 0xf0, 0x43, 0x82, 0xbe, 0x85, 0x7a, 0x48, 0x6d, 0x4a, 0x1e, 0x28, 0xbb, - 0xca, 0x93, 0xce, 0x01, 0xd6, 0x99, 0x8e, 0x5e, 0x08, 0xd6, 0xad, 0x18, 0x69, 0xf2, 0x02, 0xfc, - 0x19, 0xd4, 0xd9, 0x33, 0x6a, 0x41, 0xe3, 0xf7, 0xf1, 0x8b, 0xf1, 0xd9, 0xab, 0x71, 0xb7, 0x86, - 0xb6, 0xa0, 0x3e, 0x3c, 0x3d, 0xf9, 0xc3, 0xe8, 0x2a, 0xa8, 0x09, 0xef, 0x8d, 0x8c, 0xe1, 0xa8, - 0xab, 0xe2, 0x7f, 0x15, 0x18, 0x98, 0xc4, 0x76, 0xbd, 0x55, 0xfd, 0xef, 0xb2, 0xfa, 0x7b, 0x42, - 0xbf, 0x18, 0x9d, 0xdd, 0x80, 0xbe, 0x6e, 0x03, 0xa6, 0x31, 0x1c, 0xbd, 0xee, 0x2a, 0xa8, 0x0d, - 0x5b, 0xe3, 0xb3, 0xf3, 0x0b, 0xfe, 0xa8, 0xe2, 0x5d, 0xe8, 0x1c, 0x13, 0x3a, 0x0e, 0x5c, 0x62, - 0x92, 0xbf, 0x22, 0x12, 0x52, 0xd4, 0x01, 0xd5, 0x73, 0x99, 0xf2, 0x96, 0xa9, 0x7a, 0x2e, 0x7e, - 0x0d, 0xf7, 0x24, 0x42, 0xec, 0xef, 0x29, 0x34, 0x67, 0x84, 0xda, 0xae, 0x4d, 0x6d, 0x06, 0x6c, - 0x1d, 0xf4, 0x74, 0x3e, 0x68, 0x3d, 0x19, 0xb4, 0x3e, 0xf4, 0x6f, 0x4c, 0x89, 0x42, 0xbd, 0xa4, - 0x23, 0x95, 0xf1, 0x8a, 0xcd, 0x9a, 0xd0, 0xb1, 0x4a, 0xc5, 0x33, 0x4a, 0xea, 0x6d, 0x94, 0xf0, - 0x1e, 0xdc, 0x1f, 0x91, 0x29, 0xa1, 0xa4, 0xac, 0xa7, 0x11, 0xa0, 0x63, 0x42, 0x8f, 0xa6, 0x51, - 0x48, 0xc9, 0x42, 0xb6, 0xa5, 0x43, 0xc3, 0xe1, 0x4b, 0xa5, 0x5d, 0x25, 0x20, 0xbc, 0xc7, 0x26, - 0xc3, 0xc7, 0x2f, 0x84, 0xba, 0xb0, 0xf1, 0x86, 0xdc, 0x08, 0xa5, 0xf8, 0x27, 0xfe, 0x09, 0xba, - 0x4b, 0x90, 0x10, 0xfa, 0x1c, 0xea, 0x6f, 0xed, 0x69, 0x44, 0x4a, 0x65, 0x38, 0x04, 0x9f, 0xc1, - 0x3d, 0xab, 0x4a, 0x64, 0x49, 0xa8, 0x56, 0x13, 0x7e, 0x02, 0x88, 0x0f, 0xa8, 0x62, 0xe3, 0x1f, - 0xc3, 0xfd, 0x57, 0x36, 0x75, 0xae, 0x2b, 0x60, 0xff, 0x29, 0x80, 0xd2, 0x38, 0xd1, 0xe2, 0x73, - 0x68, 0x38, 0xc1, 0x6c, 0x66, 0xfb, 0xae, 0xb8, 0xc4, 0x1f, 0x8a, 0x4b, 0xbc, 0x8a, 0xd5, 0x8f, - 0x38, 0xd0, 0x4c, 0x2a, 0x12, 0x15, 0xb5, 0xa0, 0xc1, 0x8d, 0xea, 0x06, 0xbf, 0x80, 0x86, 0x60, - 0xcc, 0x7e, 0x04, 0x0d, 0xd8, 0xb0, 0x8c, 0xf3, 0xae, 0x82, 0x00, 0x36, 0x47, 0xc6, 0xa9, 0x71, - 0x6e, 0x74, 0x55, 0xfc, 0x11, 0xbb, 0x09, 0xa3, 0xc0, 0x89, 0x66, 0xc4, 0xa7, 0xeb, 0xee, 0xcb, - 0x11, 0xbc, 0x9f, 0x41, 0x89, 0x26, 0xbf, 0x84, 0xcd, 0x4b, 0x8f, 0x4c, 0xdd, 0xb0, 0xf4, 0x20, - 0x05, 0x06, 0x9f, 0x43, 0xef, 0x24, 0x1e, 0x41, 0x85, 0x58, 0x8a, 0x55, 0xbd, 0x05, 0xeb, 0x57, - 0xd0, 0xcf, 0xb1, 0x8a, 0xcd, 0xf5, 0xa0, 0xee, 0x04, 0x91, 0x4f, 0x19, 0x73, 0xdd, 0xe4, 0x0f, - 0xf8, 0x53, 0xe8, 0xf3, 0xd3, 0xaf, 0x6a, 0x59, 0x87, 0x41, 0x1e, 0x58, 0x4a, 0x7c, 0x0a, 0x6d, - 0x8b, 0xd8, 0x0b, 0xe7, 0x3a, 0x21, 0x7c, 0x0e, 0x9d, 0x90, 0x2d, 0x5c, 0x2c, 0xf8, 0x4a, 0xe9, - 0x90, 0xda, 0x61, 0xba, 0x18, 0xbf, 0x88, 0x9d, 0x81, 0x2f, 0x48, 0x4f, 0x6c, 0x4b, 0xba, 0x30, - 0x9a, 0x96, 0xb3, 0x6d, 0x27, 0x6c, 0x31, 0x12, 0xbf, 0x84, 0xc1, 0x31, 0xa1, 0x6c, 0x4a, 0x47, - 0x81, 0x7f, 0xe9, 0x5d, 0x49, 0xd2, 0x67, 0xb0, 0xcd, 0x6e, 0xe5, 0x85, 0xc3, 0xd6, 0x4b, 0x39, - 0x5b, 0xde, 0x92, 0x00, 0x8f, 0xa1, 0x9f, 0x50, 0xc6, 0x77, 0x39, 0x94, 0x8c, 0xdf, 0x00, 0xc7, - 0x5d, 0xc4, 0x0e, 0x57, 0x7e, 0x2f, 0xc0, 0x93, 0xe5, 0xf8, 0x57, 0x68, 0x26, 0x73, 0xbe, 0xdb, - 0x7d, 0x38, 0xf8, 0x7f, 0x0b, 0xea, 0x87, 0x71, 0x46, 0xa2, 0x63, 0x68, 0x67, 0x12, 0x0b, 0x0d, - 0x56, 0x0a, 0x8d, 0x38, 0x0d, 0xb5, 0x47, 0x65, 0xf9, 0x86, 0x6b, 0xe8, 0x04, 0x3a, 0xd9, 0xe8, - 0x59, 0xcb, 0xf4, 0xb8, 0x34, 0xa9, 0x70, 0x0d, 0x7d, 0x0f, 0x0d, 0x11, 0x26, 0xa8, 0x2f, 0xb0, - 0xd9, 0xf8, 0xd1, 0x06, 0xf9, 0xe5, 0x74, 0xad, 0x95, 0xab, 0xb5, 0xf2, 0xb5, 0x85, 0xdb, 0xc2, - 0x35, 0xf4, 0x33, 0xc0, 0x32, 0x15, 0xd0, 0x03, 0x51, 0xbe, 0x12, 0x14, 0x25, 0x0c, 0x43, 0x80, - 0x65, 0x64, 0xac, 0x1d, 0xc0, 0xce, 0x72, 0xf7, 0xb9, 0x74, 0xc1, 0x35, 0x64, 0xc0, 0x36, 0x73, - 0xbf, 0xbb, 0x90, 0x3c, 0x55, 0xd0, 0x0f, 0xd0, 0xb4, 0x7c, 0x7b, 0x1e, 0x5e, 0x07, 0x74, 0x2d, - 0xc5, 0xfa, 0x3e, 0x7e, 0x84, 0x66, 0x92, 0x47, 0x28, 0x35, 0xeb, 0xb4, 0xcb, 0x6b, 0x1f, 0xac, - 0xac, 0xcb, 0x1e, 0x62, 0xf1, 0x7c, 0x79, 0x2e, 0x9f, 0x4a, 0xc4, 0x0f, 0xa1, 0x95, 0xca, 0x1e, - 0xb4, 0x93, 0x39, 0x87, 0x5b, 0x72, 0x18, 0x00, 0xcb, 0x0c, 0x91, 0x47, 0xb9, 0x12, 0x55, 0x72, - 0x8a, 0xab, 0x81, 0xc3, 0xa6, 0xf8, 0x0b, 0xb4, 0x52, 0x96, 0x8e, 0x52, 0x33, 0xcf, 0x39, 0xa3, - 0xa6, 0x15, 0xbd, 0x92, 0x03, 0x19, 0x43, 0x3b, 0xe3, 0xbf, 0xe8, 0xa1, 0x80, 0x17, 0x79, 0xbd, - 0xfc, 0xd4, 0x0a, 0x2d, 0x1b, 0xd7, 0x9e, 0x28, 0xe8, 0x25, 0x74, 0xb2, 0xbe, 0x8b, 0x1e, 0x65, - 0xa6, 0x94, 0x67, 0x7c, 0xbc, 0xe6, 0x6d, 0x8a, 0xf2, 0x19, 0x6c, 0x72, 0x33, 0x45, 0x3d, 0x79, - 0x62, 0x29, 0xb3, 0xd5, 0xfa, 0xb9, 0xd5, 0xf4, 0x87, 0x9f, 0x35, 0xce, 0xca, 0x0f, 0xbf, 0xd8, - 0x67, 0x71, 0x2d, 0x36, 0xa3, 0x8c, 0x61, 0x56, 0x9a, 0x51, 0xa1, 0xbd, 0xe2, 0xda, 0x21, 0xfe, - 0x73, 0xf7, 0xca, 0xa3, 0xd7, 0xd1, 0x44, 0x77, 0x82, 0xd9, 0xfe, 0x2c, 0x08, 0xa3, 0x37, 0x36, - 0xff, 0xa7, 0x40, 0xfe, 0x91, 0x3f, 0xd9, 0x64, 0xbf, 0xbe, 0x7e, 0x17, 0x00, 0x00, 0xff, 0xff, - 0xbf, 0x28, 0x7e, 0x79, 0x36, 0x0c, 0x00, 0x00, + // 937 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x5b, 0x6f, 0x1a, 0x47, + 0x18, 0x65, 0xd7, 0xc5, 0xe0, 0x83, 0x21, 0x64, 0x8a, 0x5d, 0x67, 0x93, 0x36, 0x68, 0x72, 0x69, + 0x7a, 0x5b, 0x47, 0xae, 0xaa, 0xaa, 0xca, 0xa5, 0x75, 0x60, 0xe5, 0xa4, 0x46, 0xb8, 0x5d, 0x68, + 0xad, 0xf4, 0xc5, 0x5a, 0x60, 0x6c, 0xaf, 0x02, 0xbb, 0x94, 0x9d, 0x8d, 0xea, 0xa7, 0xbe, 0xf5, + 0x9f, 0xf4, 0x77, 0xf4, 0xaf, 0x55, 0x7b, 0xbf, 0xb0, 0x17, 0xa4, 0xbc, 0x31, 0x33, 0xe7, 0x3b, + 0xdf, 0x65, 0x86, 0x73, 0xb4, 0xe8, 0x2c, 0x57, 0x26, 0x37, 0x27, 0xf6, 0xe5, 0xe1, 0x64, 0xae, + 0x59, 0x5c, 0x76, 0x97, 0xa4, 0x1e, 0xec, 0x4a, 0x77, 0xae, 0x4c, 0xf3, 0x6a, 0xce, 0x0e, 0x43, + 0x98, 0x66, 0xdc, 0x78, 0x20, 0xe9, 0x6e, 0xfa, 0x88, 0x2d, 0x96, 0xdc, 0x3f, 0xa4, 0x7f, 0x63, + 0x6f, 0xa0, 0xbf, 0x67, 0x06, 0xb3, 0xac, 0x5f, 0x56, 0xe6, 0x84, 0xa9, 0xcc, 0x5a, 0x9a, 0x86, + 0xc5, 0xc8, 0x33, 0x54, 0x2d, 0xae, 0x71, 0x76, 0x20, 0x74, 0x85, 0x27, 0xad, 0xa3, 0x47, 0x72, + 0x10, 0x2e, 0x67, 0xe2, 0xe5, 0x91, 0x03, 0x56, 0xbd, 0x18, 0xfa, 0x05, 0xaa, 0xee, 0x9a, 0x34, + 0x50, 0xfb, 0x6d, 0x78, 0x3a, 0x3c, 0x3b, 0x1f, 0xb6, 0x2b, 0x64, 0x07, 0xd5, 0xe3, 0xc1, 0x9b, + 0xdf, 0x95, 0xb6, 0x40, 0xea, 0xf8, 0xa8, 0xaf, 0x1c, 0xf7, 0xdb, 0x22, 0xfd, 0x47, 0xc0, 0xbe, + 0xca, 0xb4, 0x99, 0xbe, 0x5e, 0xc2, 0xf3, 0x64, 0x09, 0x8f, 0xa3, 0x12, 0xb2, 0x03, 0x92, 0x35, + 0xc8, 0x79, 0x35, 0xa8, 0xca, 0x71, 0xff, 0x6d, 0x5b, 0x20, 0x4d, 0xec, 0x0c, 0xcf, 0xc6, 0x17, + 0xde, 0x52, 0xa4, 0x5d, 0xb4, 0x4e, 0x18, 0x1f, 0x9a, 0x33, 0xa6, 0xb2, 0x3f, 0x6d, 0x66, 0x71, + 0xd2, 0x82, 0xa8, 0xcf, 0xdc, 0xe4, 0x3b, 0xaa, 0xa8, 0xcf, 0xe8, 0x5b, 0xdc, 0x0a, 0x11, 0x7e, + 0x89, 0x4f, 0x51, 0x5f, 0x30, 0xae, 0xcd, 0x34, 0xae, 0xb9, 0xc0, 0xc6, 0x51, 0x47, 0xf6, 0xc6, + 0x1d, 0x15, 0x7b, 0x6c, 0xdc, 0xa8, 0x21, 0x8a, 0x74, 0x82, 0xa6, 0x44, 0x97, 0xd7, 0x2f, 0x56, + 0x45, 0x6b, 0x54, 0x98, 0x3c, 0x91, 0x49, 0xdc, 0x24, 0x13, 0x7d, 0x80, 0xdb, 0x7d, 0x36, 0x67, + 0x9c, 0x15, 0xf5, 0xd4, 0x07, 0x39, 0x61, 0xbc, 0x37, 0xb7, 0x2d, 0xce, 0x56, 0x61, 0x5b, 0x32, + 0x6a, 0x53, 0x6f, 0xab, 0xb0, 0xab, 0x00, 0x44, 0x1f, 0xb8, 0x93, 0xf1, 0xc6, 0xef, 0x27, 0x6a, + 0x63, 0xeb, 0x1d, 0xbb, 0xf1, 0x33, 0x39, 0x3f, 0xe9, 0x4b, 0xb4, 0x23, 0x90, 0x9f, 0xe8, 0x4b, + 0x54, 0xdf, 0x6b, 0x73, 0x9b, 0x15, 0xa6, 0xf1, 0x20, 0xf4, 0x0c, 0xb7, 0x46, 0x65, 0x49, 0x22, + 0x42, 0xb1, 0x9c, 0xf0, 0x31, 0x88, 0x37, 0xa0, 0x92, 0xc2, 0x1f, 0xe1, 0xf6, 0xb9, 0xc6, 0xa7, + 0xd7, 0x25, 0xb0, 0xff, 0x04, 0x90, 0x38, 0xce, 0x6f, 0xf1, 0x25, 0x6a, 0x53, 0x73, 0xb1, 0xd0, + 0x8c, 0x99, 0xff, 0x8e, 0x1f, 0x46, 0xc5, 0xac, 0xc3, 0xe5, 0x9e, 0x87, 0x55, 0x83, 0xa0, 0x20, + 0x91, 0x98, 0xd1, 0xe3, 0x56, 0x79, 0x8f, 0x5f, 0xa1, 0xe6, 0x33, 0x26, 0xff, 0x07, 0x35, 0x6c, + 0x8d, 0x94, 0x71, 0x5b, 0x20, 0xc0, 0x76, 0x5f, 0x19, 0x28, 0x63, 0xa5, 0x2d, 0xd2, 0x87, 0xee, + 0x63, 0xe8, 0x9b, 0x53, 0x7b, 0xc1, 0x0c, 0x9e, 0xf7, 0x64, 0x7a, 0xf8, 0x38, 0x81, 0xf2, 0xfb, + 0xfc, 0x1a, 0xdb, 0x97, 0x3a, 0x9b, 0xcf, 0xac, 0xc2, 0xbb, 0xf4, 0x31, 0x74, 0x8c, 0xce, 0x1b, + 0x63, 0xc6, 0xfe, 0x2a, 0x49, 0x16, 0x63, 0x15, 0x37, 0x60, 0xfd, 0x06, 0x7b, 0x29, 0x56, 0xbf, + 0xb8, 0x0e, 0xaa, 0x53, 0xd3, 0x36, 0xb8, 0xcb, 0x5c, 0x55, 0xbd, 0x05, 0xfd, 0x1c, 0x7b, 0xde, + 0x03, 0x28, 0x6b, 0x59, 0xc6, 0x7e, 0x1a, 0x58, 0x48, 0x3c, 0x40, 0x73, 0xc4, 0xb4, 0xd5, 0xf4, + 0x3a, 0x20, 0x7c, 0x86, 0x96, 0xe5, 0x6e, 0x5c, 0xac, 0xbc, 0x9d, 0xc2, 0x21, 0x35, 0xad, 0x78, + 0x30, 0x3d, 0x75, 0xc4, 0xc1, 0xdb, 0xf0, 0xb3, 0xfe, 0x80, 0x66, 0x48, 0x67, 0xd9, 0xf3, 0x62, + 0xb6, 0xdd, 0x80, 0xcd, 0x41, 0xd2, 0x5f, 0xb1, 0x7f, 0xc2, 0xb8, 0x3b, 0xa5, 0x9e, 0x69, 0x5c, + 0xea, 0x57, 0x21, 0xe9, 0xf7, 0xd8, 0xd5, 0x9d, 0xed, 0x8b, 0xa9, 0xbb, 0x5f, 0xc8, 0xd9, 0xd0, + 0x23, 0x02, 0x3a, 0xc4, 0x5e, 0x40, 0xe9, 0xbc, 0x65, 0x2b, 0x64, 0xfc, 0x0e, 0x1e, 0xee, 0xc2, + 0x11, 0xb9, 0xe2, 0x77, 0x01, 0x3d, 0x0c, 0xa7, 0xaf, 0x51, 0x0f, 0xe6, 0xfc, 0x61, 0xef, 0xe1, + 0xe8, 0x5f, 0xa0, 0xfa, 0xca, 0xf1, 0x4b, 0xf2, 0x33, 0x9a, 0x09, 0xdf, 0x22, 0xfb, 0x6b, 0x81, + 0x8a, 0x63, 0x8b, 0xd2, 0xfd, 0x12, 0xa3, 0xa3, 0x15, 0x32, 0x40, 0x2b, 0x69, 0x40, 0xb9, 0x64, + 0xdd, 0x32, 0xcb, 0xa2, 0x15, 0xf2, 0x13, 0x6a, 0xbe, 0xab, 0x90, 0x83, 0x08, 0x9e, 0xb4, 0x22, + 0xe9, 0x4e, 0xc6, 0x49, 0xc8, 0xf0, 0x02, 0xb5, 0xd1, 0x3a, 0x43, 0xd2, 0x4f, 0xa4, 0x9c, 0x12, + 0x69, 0x85, 0xf4, 0x80, 0xc8, 0x27, 0xc8, 0xdd, 0x08, 0xb0, 0xe6, 0x1e, 0x05, 0x24, 0x7d, 0x20, + 0xf2, 0x91, 0xdc, 0x79, 0xdc, 0x4b, 0xb4, 0x91, 0x72, 0x1d, 0x5a, 0x21, 0xaf, 0xb1, 0xeb, 0x4a, + 0xe2, 0x07, 0xf2, 0x3c, 0x15, 0xc8, 0x73, 0xd4, 0x47, 0x86, 0xb6, 0xb4, 0xae, 0x4d, 0x9e, 0xcb, + 0x52, 0x34, 0x92, 0x7a, 0x60, 0x55, 0x24, 0x39, 0xfa, 0xb8, 0x07, 0x48, 0x52, 0xd6, 0x51, 0xd8, + 0xcc, 0x8f, 0xa8, 0x8f, 0x32, 0x48, 0x52, 0x1e, 0x56, 0x50, 0x85, 0x82, 0x46, 0xcc, 0x9f, 0xc8, + 0xbd, 0xf4, 0xcd, 0x6c, 0x48, 0x73, 0x0a, 0x44, 0x3e, 0x13, 0xbf, 0xdf, 0x35, 0x53, 0x8b, 0xcf, + 0x75, 0xdd, 0x9a, 0xdc, 0xb9, 0x0e, 0xd0, 0x88, 0x89, 0x3f, 0x49, 0x5e, 0x44, 0x4a, 0x46, 0xa5, + 0x4f, 0x73, 0x4e, 0xc3, 0x11, 0x8d, 0xd1, 0x4c, 0xe8, 0x35, 0xf9, 0x2c, 0x8a, 0xc8, 0xb2, 0x87, + 0xf8, 0xbf, 0x33, 0x53, 0xe8, 0x69, 0xe5, 0x89, 0x40, 0xce, 0xd1, 0x4a, 0xaa, 0x35, 0xb9, 0x9f, + 0x1e, 0x5d, 0x9a, 0xb7, 0x9b, 0x0f, 0x88, 0x11, 0xbf, 0xc0, 0xb6, 0x27, 0xc4, 0xe4, 0x93, 0xf8, + 0x7d, 0xc6, 0xb4, 0x5a, 0x3a, 0x58, 0x3f, 0x88, 0xeb, 0x46, 0x52, 0x7a, 0x37, 0xd1, 0x8d, 0x6c, + 0xb1, 0xa6, 0x15, 0x47, 0xd1, 0x12, 0xaa, 0xbb, 0x89, 0xa2, 0x65, 0xca, 0x34, 0xad, 0xbc, 0xa2, + 0x7f, 0x74, 0xaf, 0x74, 0x7e, 0x6d, 0x4f, 0xe4, 0xa9, 0xb9, 0x38, 0x5c, 0x98, 0x96, 0xfd, 0x4e, + 0xf3, 0x3e, 0x34, 0xc2, 0xaf, 0x86, 0xc9, 0xb6, 0xfb, 0xeb, 0xdb, 0xff, 0x03, 0x00, 0x00, 0xff, + 0xff, 0x9c, 0x35, 0x0c, 0xa9, 0x8a, 0x0c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1245,7 +1245,7 @@ func NewBlastClient(cc *grpc.ClientConn) BlastClient { func (c *blastClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { out := new(LivenessProbeResponse) - err := c.cc.Invoke(ctx, "/index.Blast/LivenessProbe", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/LivenessProbe", in, out, opts...) if err != nil { return nil, err } @@ -1254,7 +1254,7 @@ func (c *blastClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts . func (c *blastClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { out := new(ReadinessProbeResponse) - err := c.cc.Invoke(ctx, "/index.Blast/ReadinessProbe", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/ReadinessProbe", in, out, opts...) if err != nil { return nil, err } @@ -1263,7 +1263,7 @@ func (c *blastClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts func (c *blastClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { out := new(GetNodeResponse) - err := c.cc.Invoke(ctx, "/index.Blast/GetNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/GetNode", in, out, opts...) if err != nil { return nil, err } @@ -1272,7 +1272,7 @@ func (c *blastClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...g func (c *blastClient) SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Blast/SetNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/SetNode", in, out, opts...) if err != nil { return nil, err } @@ -1281,7 +1281,7 @@ func (c *blastClient) SetNode(ctx context.Context, in *SetNodeRequest, opts ...g func (c *blastClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Blast/DeleteNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/DeleteNode", in, out, opts...) if err != nil { return nil, err } @@ -1290,7 +1290,7 @@ func (c *blastClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opt func (c *blastClient) GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) { out := new(GetClusterResponse) - err := c.cc.Invoke(ctx, "/index.Blast/GetCluster", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/GetCluster", in, out, opts...) if err != nil { return nil, err } @@ -1298,7 +1298,7 @@ func (c *blastClient) GetCluster(ctx context.Context, in *empty.Empty, opts ...g } func (c *blastClient) WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Blast_WatchClusterClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[0], "/index.Blast/WatchCluster", opts...) + stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[0], "/protobuf.Blast/WatchCluster", opts...) if err != nil { return nil, err } @@ -1331,7 +1331,7 @@ func (x *blastWatchClusterClient) Recv() (*GetClusterResponse, error) { func (c *blastClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Blast/Snapshot", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/Snapshot", in, out, opts...) if err != nil { return nil, err } @@ -1340,7 +1340,7 @@ func (c *blastClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grp func (c *blastClient) GetState(ctx context.Context, in *GetStateRequest, opts ...grpc.CallOption) (*GetStateResponse, error) { out := new(GetStateResponse) - err := c.cc.Invoke(ctx, "/index.Blast/GetState", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/GetState", in, out, opts...) if err != nil { return nil, err } @@ -1349,7 +1349,7 @@ func (c *blastClient) GetState(ctx context.Context, in *GetStateRequest, opts .. func (c *blastClient) SetState(ctx context.Context, in *SetStateRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Blast/SetState", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/SetState", in, out, opts...) if err != nil { return nil, err } @@ -1358,7 +1358,7 @@ func (c *blastClient) SetState(ctx context.Context, in *SetStateRequest, opts .. func (c *blastClient) DeleteState(ctx context.Context, in *DeleteStateRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Blast/DeleteState", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/DeleteState", in, out, opts...) if err != nil { return nil, err } @@ -1366,7 +1366,7 @@ func (c *blastClient) DeleteState(ctx context.Context, in *DeleteStateRequest, o } func (c *blastClient) WatchState(ctx context.Context, in *WatchStateRequest, opts ...grpc.CallOption) (Blast_WatchStateClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[1], "/index.Blast/WatchState", opts...) + stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[1], "/protobuf.Blast/WatchState", opts...) if err != nil { return nil, err } @@ -1399,7 +1399,7 @@ func (x *blastWatchStateClient) Recv() (*WatchStateResponse, error) { func (c *blastClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) { out := new(GetDocumentResponse) - err := c.cc.Invoke(ctx, "/index.Blast/GetDocument", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/GetDocument", in, out, opts...) if err != nil { return nil, err } @@ -1407,7 +1407,7 @@ func (c *blastClient) GetDocument(ctx context.Context, in *GetDocumentRequest, o } func (c *blastClient) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_IndexDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[2], "/index.Blast/IndexDocument", opts...) + stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[2], "/protobuf.Blast/IndexDocument", opts...) if err != nil { return nil, err } @@ -1441,7 +1441,7 @@ func (x *blastIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error } func (c *blastClient) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_DeleteDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[3], "/index.Blast/DeleteDocument", opts...) + stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[3], "/protobuf.Blast/DeleteDocument", opts...) if err != nil { return nil, err } @@ -1476,7 +1476,7 @@ func (x *blastDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, err func (c *blastClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { out := new(SearchResponse) - err := c.cc.Invoke(ctx, "/index.Blast/Search", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/Search", in, out, opts...) if err != nil { return nil, err } @@ -1485,7 +1485,7 @@ func (c *blastClient) Search(ctx context.Context, in *SearchRequest, opts ...grp func (c *blastClient) GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) { out := new(GetIndexConfigResponse) - err := c.cc.Invoke(ctx, "/index.Blast/GetIndexConfig", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/GetIndexConfig", in, out, opts...) if err != nil { return nil, err } @@ -1494,7 +1494,7 @@ func (c *blastClient) GetIndexConfig(ctx context.Context, in *empty.Empty, opts func (c *blastClient) GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) { out := new(GetIndexStatsResponse) - err := c.cc.Invoke(ctx, "/index.Blast/GetIndexStats", in, out, opts...) + err := c.cc.Invoke(ctx, "/protobuf.Blast/GetIndexStats", in, out, opts...) if err != nil { return nil, err } @@ -1537,7 +1537,7 @@ func _Blast_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/LivenessProbe", + FullMethod: "/protobuf.Blast/LivenessProbe", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).LivenessProbe(ctx, req.(*empty.Empty)) @@ -1555,7 +1555,7 @@ func _Blast_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/ReadinessProbe", + FullMethod: "/protobuf.Blast/ReadinessProbe", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).ReadinessProbe(ctx, req.(*empty.Empty)) @@ -1573,7 +1573,7 @@ func _Blast_GetNode_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/GetNode", + FullMethod: "/protobuf.Blast/GetNode", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).GetNode(ctx, req.(*GetNodeRequest)) @@ -1591,7 +1591,7 @@ func _Blast_SetNode_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/SetNode", + FullMethod: "/protobuf.Blast/SetNode", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).SetNode(ctx, req.(*SetNodeRequest)) @@ -1609,7 +1609,7 @@ func _Blast_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/DeleteNode", + FullMethod: "/protobuf.Blast/DeleteNode", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).DeleteNode(ctx, req.(*DeleteNodeRequest)) @@ -1627,7 +1627,7 @@ func _Blast_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/GetCluster", + FullMethod: "/protobuf.Blast/GetCluster", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).GetCluster(ctx, req.(*empty.Empty)) @@ -1666,7 +1666,7 @@ func _Blast_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/Snapshot", + FullMethod: "/protobuf.Blast/Snapshot", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).Snapshot(ctx, req.(*empty.Empty)) @@ -1684,7 +1684,7 @@ func _Blast_GetState_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/GetState", + FullMethod: "/protobuf.Blast/GetState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).GetState(ctx, req.(*GetStateRequest)) @@ -1702,7 +1702,7 @@ func _Blast_SetState_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/SetState", + FullMethod: "/protobuf.Blast/SetState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).SetState(ctx, req.(*SetStateRequest)) @@ -1720,7 +1720,7 @@ func _Blast_DeleteState_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/DeleteState", + FullMethod: "/protobuf.Blast/DeleteState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).DeleteState(ctx, req.(*DeleteStateRequest)) @@ -1759,7 +1759,7 @@ func _Blast_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/GetDocument", + FullMethod: "/protobuf.Blast/GetDocument", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).GetDocument(ctx, req.(*GetDocumentRequest)) @@ -1829,7 +1829,7 @@ func _Blast_Search_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/Search", + FullMethod: "/protobuf.Blast/Search", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).Search(ctx, req.(*SearchRequest)) @@ -1847,7 +1847,7 @@ func _Blast_GetIndexConfig_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/GetIndexConfig", + FullMethod: "/protobuf.Blast/GetIndexConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).GetIndexConfig(ctx, req.(*empty.Empty)) @@ -1865,7 +1865,7 @@ func _Blast_GetIndexStats_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Blast/GetIndexStats", + FullMethod: "/protobuf.Blast/GetIndexStats", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BlastServer).GetIndexStats(ctx, req.(*empty.Empty)) @@ -1874,7 +1874,7 @@ func _Blast_GetIndexStats_Handler(srv interface{}, ctx context.Context, dec func } var _Blast_serviceDesc = grpc.ServiceDesc{ - ServiceName: "index.Blast", + ServiceName: "protobuf.Blast", HandlerType: (*BlastServer)(nil), Methods: []grpc.MethodDesc{ { diff --git a/protobuf/blast.proto b/protobuf/blast.proto index 0998898..4bb1c86 100644 --- a/protobuf/blast.proto +++ b/protobuf/blast.proto @@ -17,7 +17,7 @@ syntax = "proto3"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; -package index; +package protobuf; option go_package = "github.com/mosuka/blast/protobuf"; From 2f5ddc0ef8b3e00a56cad35ae2e7804ddf70e9c9 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Sun, 30 Jun 2019 21:41:31 +0900 Subject: [PATCH 2/9] change to logging using zap --- cmd/blastd/dispatcher.go | 2 +- cmd/blastd/indexer.go | 2 +- cmd/blastd/manager.go | 2 +- dispatcher/grpc_service.go | 474 +++++++++++++++++++++------------ dispatcher/http_handler.go | 190 +++++++------- dispatcher/server.go | 45 ++-- go.mod | 3 +- grpc/server.go | 16 +- http/metric.go | 181 ++++++------- http/response.go | 7 +- http/router.go | 13 +- http/server.go | 6 +- indexer/grpc_service.go | 520 +++++++++++++++++++++++-------------- indexer/http_handler.go | 191 +++++++------- indexer/index.go | 121 +++++---- indexer/raft_fsm.go | 76 ++++-- indexer/raft_server.go | 161 ++++++++---- indexer/server.go | 159 +++++++----- logutils/http_logger.go | 90 +++++++ logutils/logger.go | 80 ++++++ manager/grpc_service.go | 249 +++++++++++------- manager/http_router.go | 113 ++++---- manager/raft_fsm.go | 57 ++-- manager/raft_fsm_test.go | 15 +- manager/raft_server.go | 158 +++++++---- manager/server.go | 62 ++--- 26 files changed, 1842 insertions(+), 1151 deletions(-) create mode 100644 logutils/http_logger.go create mode 100644 logutils/logger.go diff --git a/cmd/blastd/dispatcher.go b/cmd/blastd/dispatcher.go index c9b64b3..5b272c2 100644 --- a/cmd/blastd/dispatcher.go +++ b/cmd/blastd/dispatcher.go @@ -20,7 +20,7 @@ import ( "syscall" "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/logutils" + "github.com/mosuka/blast/logutils" "github.com/urfave/cli" ) diff --git a/cmd/blastd/indexer.go b/cmd/blastd/indexer.go index c2855f3..6f8836f 100644 --- a/cmd/blastd/indexer.go +++ b/cmd/blastd/indexer.go @@ -23,7 +23,7 @@ import ( "github.com/blevesearch/bleve/mapping" "github.com/mosuka/blast/indexer" - "github.com/mosuka/logutils" + "github.com/mosuka/blast/logutils" "github.com/urfave/cli" ) diff --git a/cmd/blastd/manager.go b/cmd/blastd/manager.go index 893cf76..57c9734 100644 --- a/cmd/blastd/manager.go +++ b/cmd/blastd/manager.go @@ -22,8 +22,8 @@ import ( "syscall" "github.com/blevesearch/bleve/mapping" + "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" - "github.com/mosuka/logutils" "github.com/urfave/cli" ) diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index 229b18c..a98014a 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -19,7 +19,6 @@ import ( "errors" "hash/fnv" "io" - "log" "math/rand" "reflect" "sort" @@ -33,6 +32,7 @@ import ( "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/sortutils" + "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -42,20 +42,20 @@ type GRPCService struct { managerAddr string - logger *log.Logger + logger *zap.Logger - managers map[string]interface{} - managerClients map[string]*grpc.Client - watchManagersStopCh chan struct{} - watchManagersDoneCh chan struct{} + managers map[string]interface{} + managerClients map[string]*grpc.Client + updateManagersStopCh chan struct{} + updateManagersDoneCh chan struct{} - indexers map[string]interface{} - indexerClients map[string]map[string]*grpc.Client - watchIndexersStopCh chan struct{} - watchIndexersDoneCh chan struct{} + indexers map[string]interface{} + indexerClients map[string]map[string]*grpc.Client + updateIndexersStopCh chan struct{} + updateIndexersDoneCh chan struct{} } -func NewGRPCService(managerAddr string, logger *log.Logger) (*GRPCService, error) { +func NewGRPCService(managerAddr string, logger *zap.Logger) (*GRPCService, error) { return &GRPCService{ managerAddr: managerAddr, logger: logger, @@ -69,21 +69,21 @@ func NewGRPCService(managerAddr string, logger *log.Logger) (*GRPCService, error } func (s *GRPCService) Start() error { - s.logger.Print("[INFO] start watching managers") - go s.startWatchManagers(500 * time.Millisecond) + s.logger.Info("start to update manager cluster info") + go s.startUpdateManagers(500 * time.Millisecond) - s.logger.Print("[INFO] start watching indexers") - go s.startWatchIndexers(500 * time.Millisecond) + s.logger.Info("start to update indexer cluster info") + go s.startUpdateIndexers(500 * time.Millisecond) return nil } func (s *GRPCService) Stop() error { - s.logger.Print("[INFO] stop watching managers") - s.stopWatchManagers() + s.logger.Info("stop to update manager cluster info") + s.stopUpdateManagers() - s.logger.Print("[INFO] stop watching indexers") - s.stopWatchIndexers() + s.logger.Info("stop to update indexer cluster info") + s.stopUpdateIndexers() return nil } @@ -92,23 +92,34 @@ func (s *GRPCService) getManagerClient() (*grpc.Client, error) { var client *grpc.Client for id, node := range s.managers { - state := node.(map[string]interface{})["state"].(string) - if state != raft.Shutdown.String() { + nm, ok := node.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", id)) + continue + } - if _, exist := s.managerClients[id]; exist { - client = s.managerClients[id] - break + state, ok := nm["state"].(string) + if !ok { + s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) + continue + } + + if state == raft.Leader.String() || state == raft.Follower.String() { + client, ok = s.managerClients[id] + if ok { + return client, nil } else { - s.logger.Printf("[DEBUG] %v does not exist", id) + s.logger.Error("node does not exist", zap.String("id", id)) } + } else { + s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", state)) } } - if client == nil { - return nil, errors.New("client does not exist") - } + err := errors.New("available client does not exist") + s.logger.Error(err.Error()) - return client, nil + return nil, err } func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interface{}, error) { @@ -116,32 +127,30 @@ func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interfa defer func() { err := client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } return }() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return nil, err } managers, err := client.GetCluster() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return nil, err } return managers, nil } -func (s *GRPCService) startWatchManagers(checkInterval time.Duration) { - s.logger.Printf("[INFO] start watching a cluster") - - s.watchManagersStopCh = make(chan struct{}) - s.watchManagersDoneCh = make(chan struct{}) +func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { + s.updateManagersStopCh = make(chan struct{}) + s.updateManagersDoneCh = make(chan struct{}) defer func() { - close(s.watchManagersDoneCh) + close(s.updateManagersDoneCh) }() var err error @@ -149,145 +158,195 @@ func (s *GRPCService) startWatchManagers(checkInterval time.Duration) { // get initial managers s.managers, err = s.getInitialManagers(s.managerAddr) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return } - s.logger.Printf("[DEBUG] %v", s.managers) + s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) // create clients for managers - for id, node := range s.managers { - metadata := node.(map[string]interface{})["metadata"].(map[string]interface{}) + for nodeId, node := range s.managers { + nm, ok := node.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", nodeId)) + continue + } - s.logger.Printf("[DEBUG] create client for %s", metadata["grpc_addr"].(string)) + metadata, ok := nm["metadata"].(map[string]interface{}) + if !ok { + s.logger.Warn("missing metadata", zap.String("id", nodeId), zap.Any("metadata", metadata)) + continue + } - client, err := grpc.NewClient(metadata["grpc_addr"].(string)) - if err != nil { - s.logger.Printf("[ERR] %v", err) + grpcAddr, ok := metadata["grpc_addr"].(string) + if !ok { + s.logger.Warn("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) continue } - s.managerClients[id] = client + + s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + client, err := grpc.NewClient(grpcAddr) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + } + if client != nil { + s.managerClients[nodeId] = client + } } for { select { - case <-s.watchManagersStopCh: - s.logger.Print("[DEBUG] receive request that stop watching managers") + case <-s.updateManagersStopCh: + s.logger.Info("received a request to stop updating a manager cluster") return default: - // get active client for manager client, err := s.getManagerClient() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) continue } // create stream stream, err := client.WatchCluster() if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.Canceled: - s.logger.Printf("[DEBUG] %v", err) - default: - s.logger.Printf("[ERR] %v", err) - } + s.logger.Error(err.Error()) continue } - // wait for receive cluster updates from stream - s.logger.Print("[DEBUG] wait for receive cluster updates from stream") + s.logger.Info("wait for receive a manager cluster updates from stream") resp, err := stream.Recv() if err == io.EOF { + s.logger.Info(err.Error()) continue } if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.Canceled: - s.logger.Printf("[DEBUG] %v", err) - default: - s.logger.Printf("[ERR] %v", err) - } + s.logger.Error(err.Error()) continue } // get current manager cluster - cluster, err := protobuf.MarshalAny(resp.Cluster) + managersIntr, err := protobuf.MarshalAny(resp.Cluster) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) continue } - if cluster == nil { - s.logger.Print("[ERR] nil") + if managersIntr == nil { + s.logger.Error(err.Error()) continue } - managers := *cluster.(*map[string]interface{}) + managers := *managersIntr.(*map[string]interface{}) - // compare previous manager with current manager if !reflect.DeepEqual(s.managers, managers) { - s.logger.Printf("[INFO] %v", managers) + // open clients + for id, metadata := range managers { + mm, ok := metadata.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", id)) + continue + } + + grpcAddr, ok := mm["grpc_addr"].(string) + if !ok { + s.logger.Warn("missing metadata", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + continue + } - // close the client for left manager node - for id := range s.managers { - if _, managerExists := managers[id]; !managerExists { - if _, clientExists := s.managerClients[id]; clientExists { - client := s.managerClients[id] + client, exist := s.managerClients[id] + if exist { + s.logger.Debug("client has already exist in manager list", zap.String("id", id)) + + if client.GetAddress() != grpcAddr { + s.logger.Debug("gRPC address has been changed", zap.String("id", id), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + s.logger.Debug("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + + delete(s.managerClients, id) - s.logger.Printf("[DEBUG] close client for %s", client.GetAddress()) err = client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error(), zap.String("id", id)) } - delete(s.managerClients, id) + newClient, err := grpc.NewClient(grpcAddr) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + } + + if newClient != nil { + s.managerClients[id] = newClient + } + } else { + s.logger.Debug("gRPC address has not changed", zap.String("id", id), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + } + } else { + s.logger.Debug("client does not exist in peer list", zap.String("id", id)) + + s.logger.Debug("create gRPC client", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + newClient, err := grpc.NewClient(grpcAddr) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + } + if newClient != nil { + s.managerClients[id] = newClient + } + } + } + + // close nonexistent clients + for id, client := range s.managerClients { + if metadata, exist := managers[id]; !exist { + s.logger.Info("this client is no longer in use", zap.String("id", id), zap.Any("metadata", metadata)) + + s.logger.Debug("close client", zap.String("id", id), zap.String("address", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("address", client.GetAddress())) } + + s.logger.Debug("delete client", zap.String("id", id)) + delete(s.managerClients, id) } } // keep current manager cluster s.managers = managers + s.logger.Debug("managers", zap.Any("managers", s.managers)) } } } } -func (s *GRPCService) stopWatchManagers() { - // close clients - s.logger.Printf("[INFO] close manager clients") - for _, client := range s.managerClients { - s.logger.Printf("[DEBUG] close manager client for %s", client.GetAddress()) +func (s *GRPCService) stopUpdateManagers() { + s.logger.Info("close all manager clients") + for id, client := range s.managerClients { + s.logger.Debug("close manager client", zap.String("id", id), zap.String("address", client.GetAddress())) err := client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } } - // stop watching managers - if s.watchManagersStopCh != nil { - s.logger.Printf("[INFO] stop watching managers") - close(s.watchManagersStopCh) + if s.updateManagersStopCh != nil { + s.logger.Info("send a request to stop updating a manager cluster") + close(s.updateManagersStopCh) } - // wait for stop watching managers has done - s.logger.Printf("[INFO] wait for stop watching managers has done") - <-s.watchManagersDoneCh + s.logger.Info("wait for the manager cluster update to stop") + <-s.updateManagersDoneCh + s.logger.Info("the manager cluster update has been stopped") } -func (s *GRPCService) startWatchIndexers(checkInterval time.Duration) { - s.logger.Printf("[INFO] start watching a cluster") - - s.watchIndexersStopCh = make(chan struct{}) - s.watchIndexersDoneCh = make(chan struct{}) +func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { + s.updateIndexersStopCh = make(chan struct{}) + s.updateIndexersDoneCh = make(chan struct{}) defer func() { - close(s.watchIndexersDoneCh) + close(s.updateIndexersDoneCh) }() // wait for manager available - s.logger.Print("[INFO] wait for manager clients are available") + s.logger.Info("wait for manager clients are available") for { if len(s.managerClients) > 0 { - s.logger.Print("[INFO] manager clients are available") + s.logger.Info("manager clients are available") break } time.Sleep(100 * time.Millisecond) @@ -296,128 +355,205 @@ func (s *GRPCService) startWatchIndexers(checkInterval time.Duration) { // get active client for manager client, err := s.getManagerClient() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } // get initial indexers clusters, err := client.GetState("/cluster_config/clusters/") if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } if clusters == nil { - s.logger.Print("[ERR] nil") + s.logger.Error("nil") } s.indexers = *clusters.(*map[string]interface{}) // create clients for indexer - for clusterId, ins := range s.indexers { - cluster := ins.(map[string]interface{}) - for nodeId, node := range cluster["nodes"].(map[string]interface{}) { - metadata := node.(map[string]interface{})["metadata"].(map[string]interface{}) + for clusterId, cluster := range s.indexers { + cm, ok := cluster.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("cluster", cm)) + continue + } - s.logger.Printf("[DEBUG] create indexer client for %s at %s", metadata["grpc_addr"].(string), clusterId) + nodes, ok := cm["nodes"].(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("nodes", nodes)) + continue + } - client, err := grpc.NewClient(metadata["grpc_addr"].(string)) - if err != nil { - s.logger.Printf("[ERR] %v", err) + for nodeId, node := range nodes { + nm, ok := node.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", nodeId)) continue } + metadata, ok := nm["metadata"].(map[string]interface{}) + if !ok { + s.logger.Warn("missing metadata", zap.String("id", nodeId), zap.Any("metadata", metadata)) + continue + } + + grpcAddr, ok := metadata["grpc_addr"].(string) + if !ok { + s.logger.Warn("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + continue + } + + s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + client, err := grpc.NewClient(metadata["grpc_addr"].(string)) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + } if _, exist := s.indexerClients[clusterId]; !exist { s.indexerClients[clusterId] = make(map[string]*grpc.Client) } - s.indexerClients[clusterId][nodeId] = client } } for { select { - case <-s.watchIndexersStopCh: - s.logger.Print("[DEBUG] receive request that stop watching indexers") + case <-s.updateIndexersStopCh: + s.logger.Info("received a request to stop updating a indexer cluster") return default: - // get active client for manager client, err = s.getManagerClient() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) continue } - // create stream stream, err := client.WatchState("/cluster_config/clusters/") if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.Canceled: - s.logger.Printf("[DEBUG] %s: %v", client.GetAddress(), err) - default: - s.logger.Printf("[ERR] %s: %v", client.GetAddress(), err) - } + s.logger.Error(err.Error()) continue } - // wait for receive cluster updates from stream - s.logger.Print("[DEBUG] wait for receive cluster updates from stream") + s.logger.Info("wait for receive a indexer cluster updates from stream") resp, err := stream.Recv() if err == io.EOF { continue } if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.Canceled: - s.logger.Printf("[DEBUG] %v", err) - default: - s.logger.Printf("[ERR] %v", err) - } + s.logger.Error(err.Error()) continue } - log.Printf("[DEBUG] %v", resp) + s.logger.Debug("data has changed", zap.String("key", resp.Key)) - // get current indexer cluster cluster, err := client.GetState("/cluster_config/clusters/") if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) continue } if cluster == nil { - s.logger.Print("[ERR] nil") + s.logger.Error("nil") continue } indexers := *cluster.(*map[string]interface{}) // compare previous manager with current manager if !reflect.DeepEqual(s.indexers, indexers) { - s.logger.Printf("[INFO] %v", indexers) + // create clients for indexer + for clusterId, cluster := range s.indexers { + cm, ok := cluster.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("cluster", cm)) + continue + } + + nodes, ok := cm["nodes"].(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("nodes", nodes)) + continue + } + + for nodeId, node := range nodes { + nm, ok := node.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", nodeId)) + continue + } + + metadata, ok := nm["metadata"].(map[string]interface{}) + if !ok { + s.logger.Warn("missing metadata", zap.String("id", nodeId), zap.Any("metadata", metadata)) + continue + } + + grpcAddr, ok := metadata["grpc_addr"].(string) + if !ok { + s.logger.Warn("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + continue + } + + client, exist := s.indexerClients[clusterId][nodeId] + if exist { + s.logger.Debug("client has already exist in manager list", zap.String("id", nodeId)) + + if client.GetAddress() != grpcAddr { + s.logger.Debug("gRPC address has been changed", zap.String("id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + s.logger.Debug("recreate gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + + delete(s.indexerClients[clusterId], nodeId) + + err = client.Close() + if err != nil { + s.logger.Error(err.Error(), zap.String("id", nodeId)) + } + + newClient, err := grpc.NewClient(grpcAddr) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + } + + if newClient != nil { + s.indexerClients[clusterId][nodeId] = newClient + } + } + + } else { + s.logger.Debug("client does not exist in peer list", zap.String("id", nodeId)) + + s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + newClient, err := grpc.NewClient(metadata["grpc_addr"].(string)) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + } + if _, exist := s.indexerClients[clusterId]; !exist { + s.indexerClients[clusterId] = make(map[string]*grpc.Client) + } + s.indexerClients[clusterId][nodeId] = newClient + } + } + } } } } } -func (s *GRPCService) stopWatchIndexers() { - // close clients - s.logger.Printf("[INFO] close indexer clients") +func (s *GRPCService) stopUpdateIndexers() { + s.logger.Info("close all indexer clients") for clusterId, cluster := range s.indexerClients { - for _, client := range cluster { - s.logger.Printf("[DEBUG] close indexer client for %s at %s", client.GetAddress(), clusterId) + for id, client := range cluster { + s.logger.Debug("close indexer client", zap.String("cluster_id", clusterId), zap.String("id", id), zap.String("address", client.GetAddress())) err := client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } } } - // stop watching managers - if s.watchIndexersStopCh != nil { - s.logger.Printf("[INFO] stop watching indexers") - close(s.watchIndexersStopCh) + if s.updateIndexersStopCh != nil { + s.logger.Info("send a request to stop updating a index cluster") + close(s.updateIndexersStopCh) } - // wait for stop watching indexers has done - s.logger.Printf("[INFO] wait for stop watching indexers has done") - <-s.watchIndexersDoneCh + s.logger.Info("wait for the indexer cluster update to stop") + <-s.updateIndexersDoneCh + s.logger.Info("the indexer cluster update has been stopped") } func (s *GRPCService) getIndexerClients() map[string]*grpc.Client { @@ -483,7 +619,7 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument fields = r.fields } if r.err != nil { - s.logger.Printf("[ERR] %s %v", r.clusterId, r.err) + s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } @@ -492,6 +628,7 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument fieldsAny := &any.Any{} err := protobuf.UnmarshalAny(fields, fieldsAny) if err != nil { + s.logger.Error(err.Error()) return resp, err } @@ -527,6 +664,7 @@ func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) ( // create search request ins, err := protobuf.MarshalAny(req.SearchRequest) if err != nil { + s.logger.Error(err.Error()) return resp, err } searchRequest := ins.(*bleve.SearchRequest) @@ -541,7 +679,6 @@ func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) ( for clusterId, client := range indexerClients { wg.Add(1) go func(clusterId string, client *grpc.Client, searchRequest *bleve.SearchRequest, respChan chan respVal) { - s.logger.Printf("[DEBUG] search %s", client.GetAddress()) searchResult, err := client.Search(searchRequest) wg.Done() respChan <- respVal{ @@ -571,7 +708,7 @@ func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) ( } } if r.err != nil { - s.logger.Printf("[ERR] %s %v", r.clusterId, r.err) + s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } @@ -615,6 +752,7 @@ func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) ( searchResultAny := &any.Any{} err = protobuf.UnmarshalAny(searchResult, searchResultAny) if err != nil { + s.logger.Error(err.Error()) return resp, err } @@ -652,16 +790,19 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e for { req, err := stream.Recv() - if err == io.EOF { - break - } if err != nil { + if err == io.EOF { + s.logger.Debug(err.Error()) + break + } + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } // fields ins, err := protobuf.MarshalAny(req.Fields) if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } fields := *ins.(*map[string]interface{}) @@ -692,7 +833,6 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e for clusterId, docs := range docSet { wg.Add(1) go func(clusterId string, docs []map[string]interface{}, respChan chan respVal) { - // index documents count, err := indexerClients[clusterId].IndexDocument(docs) wg.Done() respChan <- respVal{ @@ -714,7 +854,7 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e totalCount += r.count } if r.err != nil { - s.logger.Printf("[ERR] %s %v", r.clusterId, r.err) + s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } @@ -740,10 +880,12 @@ func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) for { req, err := stream.Recv() - if err == io.EOF { - break - } if err != nil { + if err == io.EOF { + s.logger.Debug(err.Error()) + break + } + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } @@ -782,7 +924,7 @@ func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) totalCount := len(ids) for r := range respChan { if r.err != nil { - s.logger.Printf("[ERR] %s %v", r.clusterId, r.err) + s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go index 388d760..8ce7744 100644 --- a/dispatcher/http_handler.go +++ b/dispatcher/http_handler.go @@ -17,8 +17,8 @@ package dispatcher import ( "encoding/json" "io/ioutil" - "log" "net/http" + "time" "github.com/blevesearch/bleve" "github.com/gorilla/mux" @@ -27,9 +27,10 @@ import ( blasthttp "github.com/mosuka/blast/http" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" ) -func NewRouter(grpcAddr string, logger *log.Logger) (*blasthttp.Router, error) { +func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { router, err := blasthttp.NewRouter(grpcAddr, logger) if err != nil { return nil, err @@ -50,23 +51,21 @@ func NewRouter(grpcAddr string, logger *log.Logger) (*blasthttp.Router, error) { } type RootHandler struct { - logger *log.Logger + logger *zap.Logger } -func NewRootHandler(logger *log.Logger) *RootHandler { +func NewRootHandler(logger *zap.Logger) *RootHandler { return &RootHandler{ logger: logger, } } func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() + start := time.Now() status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, status, h.logger) - //blasthttp.RecordMetrics(start, status, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) msgMap := map[string]interface{}{ "version": version.Version, @@ -75,16 +74,18 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { content, err := blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + + blasthttp.WriteResponse(w, content, status, h.logger) } type GetHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewGetDocumentHandler(client *grpc.Client, logger *log.Logger) *GetHandler { +func NewGetDocumentHandler(client *grpc.Client, logger *zap.Logger) *GetHandler { return &GetHandler{ client: client, logger: logger, @@ -92,13 +93,11 @@ func NewGetDocumentHandler(client *grpc.Client, logger *log.Logger) *GetHandler } func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) vars := mux.Vars(r) @@ -106,49 +105,53 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { switch err { case errors.ErrNotFound: - httpStatus = http.StatusNotFound + status = http.StatusNotFound default: - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError } msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } // map[string]interface{} -> bytes content, err = json.MarshalIndent(fields, "", " ") if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + + blasthttp.WriteResponse(w, content, status, h.logger) } type IndexHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewSetDocumentHandler(client *grpc.Client, logger *log.Logger) *IndexHandler { +func NewSetDocumentHandler(client *grpc.Client, logger *zap.Logger) *IndexHandler { return &IndexHandler{ client: client, logger: logger, @@ -156,13 +159,11 @@ func NewSetDocumentHandler(client *grpc.Client, logger *log.Logger) *IndexHandle } func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) // create documents docs := make([]map[string]interface{}, 0) @@ -172,18 +173,19 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -191,18 +193,19 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Indexing documents in bulk err := json.Unmarshal(bodyBytes, &docs) if err != nil { - httpStatus = http.StatusBadRequest + status = http.StatusBadRequest msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } } else { @@ -210,18 +213,19 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var fields map[string]interface{} err := json.Unmarshal(bodyBytes, &fields) if err != nil { - httpStatus = http.StatusBadRequest + status = http.StatusBadRequest msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -236,18 +240,19 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // index documents in bulk count, err := h.client.IndexDocument(docs) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -257,28 +262,31 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } content, err = json.MarshalIndent(msgMap, "", " ") if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + + blasthttp.WriteResponse(w, content, status, h.logger) } type DeleteHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewDeleteDocumentHandler(client *grpc.Client, logger *log.Logger) *DeleteHandler { +func NewDeleteDocumentHandler(client *grpc.Client, logger *zap.Logger) *DeleteHandler { return &DeleteHandler{ client: client, logger: logger, @@ -286,13 +294,11 @@ func NewDeleteDocumentHandler(client *grpc.Client, logger *log.Logger) *DeleteHa } func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) // create documents ids := make([]string, 0) @@ -302,18 +308,19 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -321,18 +328,19 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Deleting documents in bulk err := json.Unmarshal(bodyBytes, &ids) if err != nil { - httpStatus = http.StatusBadRequest + status = http.StatusBadRequest msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } } else { @@ -343,18 +351,19 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // delete documents in bulk count, err := h.client.DeleteDocument(ids) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -364,28 +373,29 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } content, err = json.MarshalIndent(msgMap, "", " ") if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } } type SearchHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewSearchHandler(client *grpc.Client, logger *log.Logger) *SearchHandler { +func NewSearchHandler(client *grpc.Client, logger *zap.Logger) *SearchHandler { return &SearchHandler{ client: client, logger: logger, @@ -393,28 +403,27 @@ func NewSearchHandler(client *grpc.Client, logger *log.Logger) *SearchHandler { } func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) searchRequestBytes, err := ioutil.ReadAll(r.Body) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -423,53 +432,58 @@ func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if len(searchRequestBytes) > 0 { err := json.Unmarshal(searchRequestBytes, searchRequest) if err != nil { - httpStatus = http.StatusBadRequest + status = http.StatusBadRequest msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } } searchResult, err := h.client.Search(searchRequest) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } content, err = json.MarshalIndent(&searchResult, "", " ") if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + + blasthttp.WriteResponse(w, content, status, h.logger) } diff --git a/dispatcher/server.go b/dispatcher/server.go index 5389b28..e1a7e19 100644 --- a/dispatcher/server.go +++ b/dispatcher/server.go @@ -15,11 +15,10 @@ package dispatcher import ( - "log" - accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/http" + "go.uber.org/zap" ) type Server struct { @@ -34,11 +33,11 @@ type Server struct { httpRouter *http.Router httpServer *http.Server - logger *log.Logger + logger *zap.Logger httpLogger accesslog.Logger } -func NewServer(managerAddr string, grpcAddr string, httpAddr string, logger *log.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(managerAddr string, grpcAddr string, httpAddr string, logger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ managerAddr: managerAddr, @@ -51,90 +50,84 @@ func NewServer(managerAddr string, grpcAddr string, httpAddr string, logger *log } func (s *Server) Start() { - s.logger.Printf("[INFO] start coordinator") - var err error // create gRPC service s.grpcService, err = NewGRPCService(s.managerAddr, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // create gRPC server s.grpcServer, err = grpc.NewServer(s.grpcAddr, s.grpcService, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // create HTTP router s.httpRouter, err = NewRouter(s.grpcAddr, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // create HTTP server s.httpServer, err = http.NewServer(s.httpAddr, s.httpRouter, s.logger, s.httpLogger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // start gRPC service - s.logger.Print("[INFO] start gRPC service") + s.logger.Info("start gRPC service") go func() { err := s.grpcService.Start() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } }() // start gRPC server - s.logger.Print("[INFO] start gRPC server") + s.logger.Info("start gRPC server") go func() { err := s.grpcServer.Start() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } }() // start HTTP server - s.logger.Print("[INFO] start HTTP server") + s.logger.Info("start HTTP server") go func() { _ = s.httpServer.Start() }() } func (s *Server) Stop() { - // stop HTTP server - s.logger.Printf("[INFO] stop HTTP server") + s.logger.Info("stop HTTP server") err := s.httpServer.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop HTTP router err = s.httpRouter.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop gRPC server - s.logger.Printf("[INFO] stop gRPC server") + s.logger.Info("stop gRPC server") err = s.grpcServer.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop gRPC service - s.logger.Print("[INFO] stop gRPC service") + s.logger.Info("stop gRPC service") err = s.grpcService.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } } diff --git a/go.mod b/go.mod index 0debc24..7b4859f 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68 github.com/mosuka/logutils v0.1.2 + github.com/natefinch/lumberjack v2.0.0+incompatible github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pascaldekloe/goe v0.1.0 // indirect github.com/prometheus/client_golang v0.9.2 @@ -46,7 +47,7 @@ require ( github.com/urfave/cli v1.20.0 go.uber.org/atomic v1.4.0 // indirect go.uber.org/multierr v1.1.0 // indirect - go.uber.org/zap v1.10.0 // indirect + go.uber.org/zap v1.10.0 golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 // indirect google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d // indirect google.golang.org/grpc v1.19.1 diff --git a/grpc/server.go b/grpc/server.go index 308c57a..e45576d 100644 --- a/grpc/server.go +++ b/grpc/server.go @@ -15,17 +15,17 @@ package grpc import ( - "log" "net" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - //grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" + grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" "google.golang.org/grpc" ) @@ -34,16 +34,16 @@ type Server struct { server *grpc.Server listener net.Listener - logger *log.Logger + logger *zap.Logger } -func NewServer(grpcAddr string, service protobuf.BlastServer, logger *log.Logger) (*Server, error) { +func NewServer(grpcAddr string, service protobuf.BlastServer, logger *zap.Logger) (*Server, error) { server := grpc.NewServer( grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( //grpc_ctxtags.StreamServerInterceptor(), //grpc_opentracing.StreamServerInterceptor(), grpc_prometheus.StreamServerInterceptor, - //grpc_zap.StreamServerInterceptor(zapLogger), + grpc_zap.StreamServerInterceptor(logger), //grpc_auth.StreamServerInterceptor(myAuthFunction), //grpc_recovery.StreamServerInterceptor(), )), @@ -51,7 +51,7 @@ func NewServer(grpcAddr string, service protobuf.BlastServer, logger *log.Logger //grpc_ctxtags.UnaryServerInterceptor(), //grpc_opentracing.UnaryServerInterceptor(), grpc_prometheus.UnaryServerInterceptor, - //grpc_zap.UnaryServerInterceptor(zapLogger), + grpc_zap.UnaryServerInterceptor(logger), //grpc_auth.UnaryServerInterceptor(myAuthFunction), //grpc_recovery.UnaryServerInterceptor(), )), @@ -76,7 +76,7 @@ func NewServer(grpcAddr string, service protobuf.BlastServer, logger *log.Logger } func (s *Server) Start() error { - s.logger.Print("[INFO] start server") + s.logger.Info("start server") err := s.server.Serve(s.listener) if err != nil { return err @@ -86,7 +86,7 @@ func (s *Server) Start() error { } func (s *Server) Stop() error { - s.logger.Print("[INFO] stop server") + s.logger.Info("stop server") s.server.Stop() // TODO: graceful stop return nil diff --git a/http/metric.go b/http/metric.go index 4ca3bad..09afbf5 100644 --- a/http/metric.go +++ b/http/metric.go @@ -14,100 +14,87 @@ package http -//import ( -// "log" -// "net/http" -// "strconv" -// "time" -// -// "github.com/prometheus/client_golang/prometheus" -//) -// -//var ( -// namespace = "blast" -// subsystem = "http" -// -// DurationSeconds = prometheus.NewHistogramVec( -// prometheus.HistogramOpts{ -// Namespace: namespace, -// Subsystem: subsystem, -// Name: "duration_seconds", -// Help: "The invocation duration in seconds.", -// }, -// []string{ -// "request_uri", -// }, -// ) -// -// RequestsTotal = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Namespace: namespace, -// Subsystem: subsystem, -// Name: "requests_total", -// Help: "The number of requests.", -// }, -// []string{ -// "request_uri", -// "method", -// }, -// ) -// -// ResponsesTotal = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Namespace: namespace, -// Subsystem: subsystem, -// Name: "responses_total", -// Help: "The number of responses.", -// }, -// []string{ -// "request_uri", -// "status", -// }, -// ) -// -// RequestsBytesTotal = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Namespace: namespace, -// Subsystem: subsystem, -// Name: "requests_bytes_total", -// Help: "A summary of the invocation requests bytes.", -// }, -// []string{ -// "request_uri", -// "method", -// }, -// ) -// -// ResponsesBytesTotal = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Namespace: namespace, -// Subsystem: subsystem, -// Name: "responses_bytes_total", -// Help: "A summary of the invocation responses bytes.", -// }, -// []string{ -// "request_uri", -// "method", -// }, -// ) -//) -// -//func init() { -// prometheus.MustRegister(DurationSeconds) -// prometheus.MustRegister(RequestsTotal) -// prometheus.MustRegister(ResponsesTotal) -// prometheus.MustRegister(RequestsBytesTotal) -// prometheus.MustRegister(ResponsesBytesTotal) -//} -// -//func RecordMetrics(start time.Time, status int, writer http.ResponseWriter, request *http.Request, logger *log.Logger) { -// DurationSeconds.With(prometheus.Labels{"request_uri": request.RequestURI}).Observe(float64(time.Since(start)) / float64(time.Second)) -// RequestsTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "method": request.Method}).Inc() -// ResponsesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "status": strconv.Itoa(status)}).Inc() -// RequestsBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "method": request.Method}).Add(float64(request.ContentLength)) -// contentLength, err := strconv.ParseFloat(writer.Header().Get("Content-Length"), 64) -// if err != nil { -// logger.Printf("[ERR] Failed to parse content length: %v", err) -// } -// ResponsesBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "method": request.Method}).Add(contentLength) -//} +import ( + "net/http" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + namespace = "http" + subsystem = "server" + + DurationSeconds = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "handling_seconds", + Help: "The invocation duration in seconds.", + }, + []string{ + "request_uri", + }, + ) + + RequestsTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "handled_total", + Help: "The number of requests.", + }, + []string{ + "request_uri", + "http_method", + "http_status", + }, + ) + + RequestsBytesTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "requests_received_bytes", + Help: "A summary of the invocation requests bytes.", + }, + []string{ + "request_uri", + "http_method", + }, + ) + + ResponsesBytesTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "responses_sent_bytes", + Help: "A summary of the invocation responses bytes.", + }, + []string{ + "request_uri", + "http_method", + }, + ) +) + +func init() { + prometheus.MustRegister(DurationSeconds) + prometheus.MustRegister(RequestsTotal) + prometheus.MustRegister(RequestsBytesTotal) + prometheus.MustRegister(ResponsesBytesTotal) +} + +func RecordMetrics(start time.Time, status int, writer http.ResponseWriter, request *http.Request) { + DurationSeconds.With(prometheus.Labels{"request_uri": request.RequestURI}).Observe(float64(time.Since(start)) / float64(time.Second)) + + RequestsTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method, "http_status": strconv.Itoa(status)}).Inc() + + RequestsBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method}).Add(float64(request.ContentLength)) + + contentLength, err := strconv.ParseFloat(writer.Header().Get("Content-Length"), 64) + if err == nil { + ResponsesBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method}).Add(contentLength) + } +} diff --git a/http/response.go b/http/response.go index 9043d94..d51fdc2 100644 --- a/http/response.go +++ b/http/response.go @@ -16,9 +16,10 @@ package http import ( "encoding/json" - "log" "net/http" "strconv" + + "go.uber.org/zap" ) func NewJSONMessage(msgMap map[string]interface{}) ([]byte, error) { @@ -30,13 +31,13 @@ func NewJSONMessage(msgMap map[string]interface{}) ([]byte, error) { return content, nil } -func WriteResponse(w http.ResponseWriter, content []byte, status int, logger *log.Logger) { +func WriteResponse(w http.ResponseWriter, content []byte, status int, logger *zap.Logger) { w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("Content-Length", strconv.FormatInt(int64(len(content)), 10)) w.WriteHeader(status) _, err := w.Write(content) if err != nil { - logger.Printf("[ERR] handler: Failed to write content: %s", err.Error()) + logger.Error(err.Error()) } return diff --git a/http/router.go b/http/router.go index b2ea7fb..40a9a92 100644 --- a/http/router.go +++ b/http/router.go @@ -15,29 +15,30 @@ package http import ( - "log" - "github.com/gorilla/mux" "github.com/mosuka/blast/grpc" + "go.uber.org/zap" ) type Router struct { mux.Router GRPCClient *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewRouter(grpcAddr string, logger *log.Logger) (*Router, error) { +func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { grpcClient, err := grpc.NewClient(grpcAddr) if err != nil { return nil, err } - return &Router{ + router := &Router{ GRPCClient: grpcClient, logger: logger, - }, nil + } + + return router, nil } func (r *Router) Close() error { diff --git a/http/server.go b/http/server.go index 4e531c2..8cdb7cf 100644 --- a/http/server.go +++ b/http/server.go @@ -15,22 +15,22 @@ package http import ( - "log" "net" "net/http" accesslog "github.com/mash/go-accesslog" + "go.uber.org/zap" ) type Server struct { listener net.Listener router *Router - logger *log.Logger + logger *zap.Logger httpLogger accesslog.Logger } -func NewServer(httpAddr string, router *Router, logger *log.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { listener, err := net.Listen("tcp", httpAddr) if err != nil { return nil, err diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index d3a69c5..c270151 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -17,9 +17,7 @@ package indexer import ( "context" "errors" - "fmt" "io" - "log" "reflect" "sync" "time" @@ -31,6 +29,7 @@ import ( blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -42,23 +41,23 @@ type GRPCService struct { clusterId string raftServer *RaftServer - logger *log.Logger - - watchClusterStopCh chan struct{} - watchClusterDoneCh chan struct{} - peers map[string]interface{} - peerClients map[string]*grpc.Client - cluster map[string]interface{} - clusterChans map[chan protobuf.GetClusterResponse]struct{} - clusterMutex sync.RWMutex - - managers map[string]interface{} - managerClients map[string]*grpc.Client - watchManagersStopCh chan struct{} - watchManagersDoneCh chan struct{} + logger *zap.Logger + + updateClusterStopCh chan struct{} + updateClusterDoneCh chan struct{} + peers map[string]interface{} + peerClients map[string]*grpc.Client + cluster map[string]interface{} + clusterChans map[chan protobuf.GetClusterResponse]struct{} + clusterMutex sync.RWMutex + + managers map[string]interface{} + managerClients map[string]*grpc.Client + updateManagersStopCh chan struct{} + updateManagersDoneCh chan struct{} } -func NewGRPCService(managerAddr string, clusterId string, raftServer *RaftServer, logger *log.Logger) (*GRPCService, error) { +func NewGRPCService(managerAddr string, clusterId string, raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { return &GRPCService{ managerAddr: managerAddr, clusterId: clusterId, @@ -77,11 +76,11 @@ func NewGRPCService(managerAddr string, clusterId string, raftServer *RaftServer } func (s *GRPCService) Start() error { - s.logger.Print("[INFO] start watching cluster") + s.logger.Info("start to update cluster info") go s.startUpdateCluster(500 * time.Millisecond) if s.managerAddr != "" { - s.logger.Print("[INFO] start watching managers") + s.logger.Info("start to update manager cluster info") go s.startUpdateManagers(500 * time.Millisecond) } @@ -89,11 +88,11 @@ func (s *GRPCService) Start() error { } func (s *GRPCService) Stop() error { - s.logger.Print("[INFO] stop watching cluster") + s.logger.Info("stop to update cluster info") s.stopUpdateCluster() if s.managerAddr != "" { - s.logger.Print("[INFO] stop watching managers") + s.logger.Info("stop to update manager cluster info") s.stopUpdateManagers() } @@ -104,23 +103,34 @@ func (s *GRPCService) getManagerClient() (*grpc.Client, error) { var client *grpc.Client for id, node := range s.managers { - state := node.(map[string]interface{})["state"].(string) - if state != raft.Shutdown.String() { + nm, ok := node.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", id)) + continue + } - if _, exist := s.managerClients[id]; exist { - client = s.managerClients[id] - break + state, ok := nm["state"].(string) + if !ok { + s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) + continue + } + + if state == raft.Leader.String() || state == raft.Follower.String() { + client, ok = s.managerClients[id] + if ok { + return client, nil } else { - s.logger.Printf("[DEBUG] %v does not exist", id) + s.logger.Error("node does not exist", zap.String("id", id)) } + } else { + s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", state)) } } - if client == nil { - return nil, errors.New("client does not exist") - } + err := errors.New("available client does not exist") + s.logger.Error(err.Error()) - return client, nil + return nil, err } func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interface{}, error) { @@ -128,18 +138,18 @@ func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interfa defer func() { err := client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } return }() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return nil, err } managers, err := client.GetCluster() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return nil, err } @@ -147,13 +157,11 @@ func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interfa } func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { - s.logger.Printf("[INFO] start watching a cluster") - - s.watchManagersStopCh = make(chan struct{}) - s.watchManagersDoneCh = make(chan struct{}) + s.updateManagersStopCh = make(chan struct{}) + s.updateManagersDoneCh = make(chan struct{}) defer func() { - close(s.watchManagersDoneCh) + close(s.updateManagersDoneCh) }() var err error @@ -161,161 +169,221 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { // get initial managers s.managers, err = s.getInitialManagers(s.managerAddr) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return } - s.logger.Printf("[DEBUG] %v", s.managers) + s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) // create clients for managers for nodeId, node := range s.managers { - metadata := node.(map[string]interface{})["metadata"].(map[string]interface{}) + nm, ok := node.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", nodeId)) + continue + } - s.logger.Printf("[DEBUG] create client for %s", metadata["grpc_addr"].(string)) + metadata, ok := nm["metadata"].(map[string]interface{}) + if !ok { + s.logger.Warn("missing metadata", zap.String("id", nodeId), zap.Any("metadata", metadata)) + continue + } - client, err := grpc.NewClient(metadata["grpc_addr"].(string)) - if err != nil { - s.logger.Printf("[ERR] %v", err) + grpcAddr, ok := metadata["grpc_addr"].(string) + if !ok { + s.logger.Warn("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) continue } - s.managerClients[nodeId] = client + + s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + client, err := grpc.NewClient(grpcAddr) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + } + if client != nil { + s.managerClients[nodeId] = client + } } for { select { - case <-s.watchManagersStopCh: - s.logger.Print("[DEBUG] receive request that stop watching a cluster") + case <-s.updateManagersStopCh: + s.logger.Info("received a request to stop updating a manager cluster") return default: - // get active client for manager client, err := s.getManagerClient() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) continue } - // create stream stream, err := client.WatchCluster() if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.Canceled: - s.logger.Printf("[DEBUG] %v", err) - default: - s.logger.Printf("[ERR] %v", err) - } + s.logger.Error(err.Error()) continue } - // wait for receive cluster updates from stream - s.logger.Print("[DEBUG] wait for receive cluster updates from stream") + s.logger.Info("wait for receive a manager cluster updates from stream") resp, err := stream.Recv() if err == io.EOF { + s.logger.Info(err.Error()) continue } if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.Canceled: - s.logger.Printf("[DEBUG] %v", err) - default: - s.logger.Printf("[ERR] %v", err) - } + s.logger.Error(err.Error()) continue } // get current manager cluster - cluster, err := protobuf.MarshalAny(resp.Cluster) + managersIntr, err := protobuf.MarshalAny(resp.Cluster) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) continue } - if cluster == nil { - s.logger.Print("[ERR] nil") + if managersIntr == nil { + s.logger.Error(err.Error()) continue } - managers := *cluster.(*map[string]interface{}) + managers := *managersIntr.(*map[string]interface{}) - // compare previous manager with current manager if !reflect.DeepEqual(s.managers, managers) { - s.logger.Printf("[INFO] %v", managers) + // open clients + for id, metadata := range managers { + mm, ok := metadata.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", id)) + continue + } + + grpcAddr, ok := mm["grpc_addr"].(string) + if !ok { + s.logger.Warn("missing metadata", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + continue + } - // close the client for left manager node - for id := range s.managers { - if _, managerExists := managers[id]; !managerExists { - if _, clientExists := s.managerClients[id]; clientExists { - client := s.managerClients[id] + client, exist := s.managerClients[id] + if exist { + s.logger.Debug("client has already exist in manager list", zap.String("id", id)) + + if client.GetAddress() != grpcAddr { + s.logger.Debug("gRPC address has been changed", zap.String("id", id), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + s.logger.Debug("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + + delete(s.managerClients, id) - s.logger.Printf("[DEBUG] close client for %s", client.GetAddress()) err = client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error(), zap.String("id", id)) } - delete(s.managerClients, id) + newClient, err := grpc.NewClient(grpcAddr) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + } + + if newClient != nil { + s.managerClients[id] = newClient + } + } else { + s.logger.Debug("gRPC address has not changed", zap.String("id", id), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + } + } else { + s.logger.Debug("client does not exist in peer list", zap.String("id", id)) + + s.logger.Debug("create gRPC client", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + newClient, err := grpc.NewClient(grpcAddr) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + } + if newClient != nil { + s.managerClients[id] = newClient + } + } + } + + // close nonexistent clients + for id, client := range s.managerClients { + if metadata, exist := managers[id]; !exist { + s.logger.Info("this client is no longer in use", zap.String("id", id), zap.Any("metadata", metadata)) + + s.logger.Debug("close client", zap.String("id", id), zap.String("address", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("address", client.GetAddress())) } + + s.logger.Debug("delete client", zap.String("id", id)) + delete(s.managerClients, id) } } // keep current manager cluster s.managers = managers + s.logger.Debug("managers", zap.Any("managers", s.managers)) } } } } func (s *GRPCService) stopUpdateManagers() { - // close clients - s.logger.Printf("[INFO] close manager clients") - for _, client := range s.managerClients { - s.logger.Printf("[DEBUG] close manager client for %s", client.GetAddress()) + s.logger.Info("close all manager clients") + for id, client := range s.managerClients { + s.logger.Debug("close manager client", zap.String("id", id), zap.String("address", client.GetAddress())) err := client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } } - // stop watching managers - if s.watchManagersStopCh != nil { - s.logger.Printf("[INFO] stop watching managers") - close(s.watchManagersStopCh) + if s.updateManagersStopCh != nil { + s.logger.Info("send a request to stop updating a manager cluster") + close(s.updateManagersStopCh) } - // wait for stop watching managers has done - s.logger.Printf("[INFO] wait for stop watching managers has done") - <-s.watchManagersDoneCh + s.logger.Info("wait for the manager cluster update to stop") + <-s.updateManagersDoneCh + s.logger.Info("the manager cluster update has been stopped") } func (s *GRPCService) getLeaderClient() (*grpc.Client, error) { var client *grpc.Client for id, node := range s.cluster { - state := node.(map[string]interface{})["state"].(string) - if state != raft.Shutdown.String() { + nm, ok := node.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", id)) + continue + } - if _, exist := s.peerClients[id]; exist { - client = s.peerClients[id] - break + state, ok := nm["state"].(string) + if !ok { + s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) + continue + } + + if state == raft.Leader.String() { + client, ok = s.peerClients[id] + if ok { + return client, nil } else { - s.logger.Printf("[DEBUG] %v does not exist", id) + s.logger.Error("node does not exist", zap.String("id", id)) } + } else { + s.logger.Debug("not a leader", zap.String("id", id)) } } - if client == nil { - return nil, errors.New("client does not exist") - } + err := errors.New("available client does not exist") + s.logger.Error(err.Error()) - return client, nil + return nil, err } func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { - s.watchClusterStopCh = make(chan struct{}) - s.watchClusterDoneCh = make(chan struct{}) - - s.logger.Printf("[INFO] start watching a cluster") + s.updateClusterStopCh = make(chan struct{}) + s.updateClusterDoneCh = make(chan struct{}) defer func() { - close(s.watchClusterDoneCh) + close(s.updateClusterDoneCh) }() ticker := time.NewTicker(checkInterval) @@ -323,14 +391,14 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { for { select { - case <-s.watchClusterStopCh: - s.logger.Print("[DEBUG] receive request that stop watching a cluster") + case <-s.updateClusterStopCh: + s.logger.Info("received a request to stop updating a cluster") return case <-ticker.C: // get servers servers, err := s.raftServer.GetServers() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return } @@ -342,96 +410,108 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { } } - // create and close clients for manager if !reflect.DeepEqual(s.peers, peers) { // create clients for id, metadata := range peers { - grpcAddr := metadata.(map[string]interface{})["grpc_addr"].(string) + mm, ok := metadata.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", id)) + continue + } + + grpcAddr, ok := mm["grpc_addr"].(string) + if !ok { + s.logger.Warn("missing metadata", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + continue + } + + client, exist := s.peerClients[id] + if exist { + s.logger.Debug("client has already exist in peer list", zap.String("id", id)) - if _, clientExists := s.peerClients[id]; clientExists { - client := s.peerClients[id] if client.GetAddress() != grpcAddr { - s.logger.Printf("[DEBUG] close client for %s", client.GetAddress()) + s.logger.Debug("gRPC address has been changed", zap.String("id", id), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + s.logger.Debug("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + + delete(s.peerClients, id) + err = client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error(), zap.String("id", id)) } - s.logger.Printf("[DEBUG] create client for %s", grpcAddr) newClient, err := grpc.NewClient(grpcAddr) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", grpcAddr)) } - if client != nil { - s.logger.Printf("[DEBUG] create client for %s", newClient.GetAddress()) + if newClient != nil { s.peerClients[id] = newClient } + } else { + s.logger.Debug("gRPC address has not changed", zap.String("id", id), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) } } else { - s.logger.Printf("[DEBUG] create client for %s", grpcAddr) + s.logger.Debug("client does not exist in peer list", zap.String("id", id)) + + s.logger.Debug("create gRPC client", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) newClient, err := grpc.NewClient(grpcAddr) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + } + if newClient != nil { + s.peerClients[id] = newClient } - - s.peerClients[id] = newClient } } // close nonexistent clients - for id := range s.peers { - if _, peerExists := peers[id]; !peerExists { - if _, clientExists := s.peerClients[id]; clientExists { - client := s.peerClients[id] - - s.logger.Printf("[DEBUG] close client for %s", client.GetAddress()) - err = client.Close() - if err != nil { - s.logger.Printf("[ERR] %v", err) - } + for id, client := range s.peerClients { + if metadata, exist := peers[id]; !exist { + s.logger.Info("this client is no longer in use", zap.String("id", id), zap.Any("metadata", metadata)) - delete(s.peerClients, id) + s.logger.Debug("close client", zap.String("id", id), zap.String("address", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("address", client.GetAddress())) } + + s.logger.Debug("delete client", zap.String("id", id)) + delete(s.peerClients, id) } } // keep current peer nodes s.peers = peers + s.logger.Debug("peers", zap.Any("peers", s.peers)) } // get cluster + cluster := make(map[string]interface{}, 0) ctx, _ := grpc.NewContext() resp, err := s.GetCluster(ctx, &empty.Empty{}) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - clusterInter, err := protobuf.MarshalAny(resp.Cluster) - cluster := *clusterInter.(*map[string]interface{}) + clusterIntr, err := protobuf.MarshalAny(resp.Cluster) + if err != nil { + s.logger.Error(err.Error()) + } + if clusterIntr == nil { + s.logger.Error("unexpected value") + } + cluster = *clusterIntr.(*map[string]interface{}) + // notify current cluster if !reflect.DeepEqual(s.cluster, cluster) { - // notify cluster state for c := range s.clusterChans { + s.logger.Debug("notify cluster changes to client", zap.Any("response", resp)) c <- *resp } - // update cluster to manager - if s.raftServer.IsLeader() && s.managerAddr != "" { - // get active client for manager - client, err := s.getManagerClient() - if err != nil { - s.logger.Printf("[ERR] %v", err) - continue - } - s.logger.Printf("[DEBUG] update cluster state: %s %v", s.clusterId, cluster) - err = client.SetState(fmt.Sprintf("/cluster_config/clusters/%s/nodes", s.clusterId), cluster) - if err != nil { - continue - } - } - // keep current cluster s.cluster = cluster + s.logger.Debug("cluster", zap.Any("cluster", cluster)) } default: time.Sleep(100 * time.Millisecond) @@ -440,59 +520,67 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { } func (s *GRPCService) stopUpdateCluster() { - // close clients - s.logger.Printf("[INFO] close peer clients") - for _, client := range s.peerClients { - s.logger.Printf("[DEBUG] close peer client for %s", client.GetAddress()) + s.logger.Info("close all peer clients") + for id, client := range s.peerClients { + s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) err := client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } } - // stop watching peers - if s.watchClusterStopCh != nil { - s.logger.Printf("[INFO] stop watching peers") - close(s.watchClusterStopCh) + if s.updateClusterStopCh != nil { + s.logger.Info("send a request to stop updating a cluster") + close(s.updateClusterStopCh) } - // wait for stop watching peers has done - s.logger.Printf("[INFO] wait for stop watching peers has done") - <-s.watchClusterDoneCh + s.logger.Info("wait for the cluster update to stop") + <-s.updateClusterDoneCh + s.logger.Info("the cluster update has been stopped") } func (s *GRPCService) getSelfNode() (map[string]interface{}, error) { - metadata, err := s.raftServer.GetMetadata(s.raftServer.id) - if err != nil { - s.logger.Printf("[ERR] %v", err) - } + s.logger.Debug("get own node information") + + var node map[string]interface{} - node := map[string]interface{}{ - "metadata": metadata, - "state": s.raftServer.State(), + metadata, err := s.raftServer.GetMetadata(s.raftServer.id) + if err == nil { + node = map[string]interface{}{ + "metadata": metadata, + "state": s.raftServer.State(), + } + } else { + s.logger.Error(err.Error()) + node = map[string]interface{}{ + "metadata": map[string]interface{}{}, + "state": raft.Shutdown.String(), + } } return node, nil } func (s *GRPCService) getPeerNode(id string) (map[string]interface{}, error) { + s.logger.Debug("get peer node information", zap.String("id", id)) + var node map[string]interface{} var err error if client, exist := s.peerClients[id]; exist { node, err = client.GetNode(id) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) node = map[string]interface{}{ "metadata": map[string]interface{}{}, "state": raft.Shutdown.String(), } } } else { - s.logger.Printf("[ERR] %v does not exist", id) + s.logger.Error("node does not exist in peer list", zap.String("id", id)) node = map[string]interface{}{ "metadata": map[string]interface{}{}, - "state": "Gone", + "state": raft.Shutdown.String(), } } @@ -510,26 +598,25 @@ func (s *GRPCService) GetNode(ctx context.Context, req *protobuf.GetNodeRequest) node, err = s.getPeerNode(req.Id) } if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } metadataAny := &any.Any{} - state := "Gone" - if node != nil { - if _, exist := node["metadata"]; exist { - if node["metadata"] != nil { - err = protobuf.UnmarshalAny(node["metadata"].(map[string]interface{}), metadataAny) - if err != nil { - return resp, status.Error(codes.Internal, err.Error()) - } - } + if metadata, exist := node["metadata"]; exist { + err = protobuf.UnmarshalAny(metadata.(map[string]interface{}), metadataAny) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } + } else { + s.logger.Error("missing metadata", zap.Any("metadata", metadata)) + } - if _, exist := node["state"]; exist { - if node["state"] != nil { - state = node["state"].(string) - } - } + state, exist := node["state"].(string) + if !exist { + s.logger.Error("missing node state", zap.String("state", state)) + state = raft.Shutdown.String() } resp.Metadata = metadataAny @@ -543,6 +630,7 @@ func (s *GRPCService) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) ins, err := protobuf.MarshalAny(req.Metadata) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -551,16 +639,19 @@ func (s *GRPCService) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) if s.raftServer.IsLeader() { err = s.raftServer.SetMetadata(req.Id, metadata) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } else { // forward to leader client, err := s.getLeaderClient() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } err = client.SetNode(req.Id, metadata) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } @@ -574,16 +665,19 @@ func (s *GRPCService) DeleteNode(ctx context.Context, req *protobuf.DeleteNodeRe if s.raftServer.IsLeader() { err := s.raftServer.DeleteMetadata(req.Id) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } else { // forward to leader client, err := s.getLeaderClient() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } err = client.DeleteNode(req.Id) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } @@ -596,6 +690,7 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protob servers, err := s.raftServer.GetServers() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -603,11 +698,13 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protob for id := range servers { nodeResp, err := s.GetNode(ctx, &protobuf.GetNodeRequest{Id: id}) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } metadataIntr, err := protobuf.MarshalAny(nodeResp.Metadata) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } metadata := *metadataIntr.(*map[string]interface{}) @@ -623,6 +720,7 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protob clusterAny := &any.Any{} err = protobuf.UnmarshalAny(cluster, clusterAny) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -648,6 +746,7 @@ func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_Watch for resp := range chans { err := server.Send(&resp) if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } } @@ -660,6 +759,7 @@ func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Em err := s.raftServer.Snapshot() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -671,6 +771,7 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument fields, err := s.raftServer.GetDocument(req.Id) if err != nil { + s.logger.Error(err.Error()) switch err { case blasterrors.ErrNotFound: return resp, status.Error(codes.NotFound, err.Error()) @@ -682,6 +783,7 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument fieldsAny := &any.Any{} err = protobuf.UnmarshalAny(fields, fieldsAny) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -693,21 +795,22 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { resp := &protobuf.SearchResponse{} - // Any -> bleve.SearchRequest searchRequest, err := protobuf.MarshalAny(req.SearchRequest) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.InvalidArgument, err.Error()) } searchResult, err := s.raftServer.Search(searchRequest.(*bleve.SearchRequest)) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - // bleve.SearchResult -> Any searchResultAny := &any.Any{} err = protobuf.UnmarshalAny(searchResult, searchResultAny) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -721,16 +824,19 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e for { req, err := stream.Recv() - if err == io.EOF { - break - } if err != nil { + if err == io.EOF { + s.logger.Debug(err.Error()) + break + } + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } // fields ins, err := protobuf.MarshalAny(req.Fields) if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } fields := *ins.(*map[string]interface{}) @@ -750,16 +856,19 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e if s.raftServer.IsLeader() { count, err = s.raftServer.IndexDocument(docs) if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } } else { // forward to leader client, err := s.getLeaderClient() if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } count, err = client.IndexDocument(docs) if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } } @@ -776,10 +885,12 @@ func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) for { req, err := stream.Recv() - if err == io.EOF { - break - } if err != nil { + if err == io.EOF { + s.logger.Debug(err.Error()) + break + } + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } @@ -792,16 +903,19 @@ func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) if s.raftServer.IsLeader() { count, err = s.raftServer.DeleteDocument(ids) if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } } else { // forward to leader client, err := s.getLeaderClient() if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } count, err = client.DeleteDocument(ids) if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } } @@ -818,12 +932,14 @@ func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*pr indexConfig, err := s.raftServer.GetIndexConfig() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } indexConfigAny := &any.Any{} err = protobuf.UnmarshalAny(indexConfig, indexConfigAny) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -837,12 +953,14 @@ func (s *GRPCService) GetIndexStats(ctx context.Context, req *empty.Empty) (*pro indexStats, err := s.raftServer.GetIndexStats() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } indexStatsAny := &any.Any{} err = protobuf.UnmarshalAny(indexStats, indexStatsAny) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } diff --git a/indexer/http_handler.go b/indexer/http_handler.go index 12dbda1..984143d 100644 --- a/indexer/http_handler.go +++ b/indexer/http_handler.go @@ -17,8 +17,8 @@ package indexer import ( "encoding/json" "io/ioutil" - "log" "net/http" + "time" "github.com/blevesearch/bleve" "github.com/gorilla/mux" @@ -27,9 +27,10 @@ import ( blasthttp "github.com/mosuka/blast/http" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" ) -func NewRouter(grpcAddr string, logger *log.Logger) (*blasthttp.Router, error) { +func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { router, err := blasthttp.NewRouter(grpcAddr, logger) if err != nil { return nil, err @@ -50,23 +51,21 @@ func NewRouter(grpcAddr string, logger *log.Logger) (*blasthttp.Router, error) { } type RootHandler struct { - logger *log.Logger + logger *zap.Logger } -func NewRootHandler(logger *log.Logger) *RootHandler { +func NewRootHandler(logger *zap.Logger) *RootHandler { return &RootHandler{ logger: logger, } } func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() + start := time.Now() status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, status, h.logger) - //blasthttp.RecordMetrics(start, status, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) msgMap := map[string]interface{}{ "version": version.Version, @@ -75,16 +74,18 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { content, err := blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + + blasthttp.WriteResponse(w, content, status, h.logger) } type GetHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewGetDocumentHandler(client *grpc.Client, logger *log.Logger) *GetHandler { +func NewGetDocumentHandler(client *grpc.Client, logger *zap.Logger) *GetHandler { return &GetHandler{ client: client, logger: logger, @@ -92,13 +93,11 @@ func NewGetDocumentHandler(client *grpc.Client, logger *log.Logger) *GetHandler } func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) vars := mux.Vars(r) @@ -108,49 +107,53 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { switch err { case errors.ErrNotFound: - httpStatus = http.StatusNotFound + status = http.StatusNotFound default: - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError } msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } // map[string]interface{} -> bytes content, err = json.MarshalIndent(fields, "", " ") if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + + blasthttp.WriteResponse(w, content, status, h.logger) } type IndexHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewSetDocumentHandler(client *grpc.Client, logger *log.Logger) *IndexHandler { +func NewSetDocumentHandler(client *grpc.Client, logger *zap.Logger) *IndexHandler { return &IndexHandler{ client: client, logger: logger, @@ -158,13 +161,11 @@ func NewSetDocumentHandler(client *grpc.Client, logger *log.Logger) *IndexHandle } func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) // create documents docs := make([]map[string]interface{}, 0) @@ -174,18 +175,19 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -193,18 +195,19 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Indexing documents in bulk err := json.Unmarshal(bodyBytes, &docs) if err != nil { - httpStatus = http.StatusBadRequest + status = http.StatusBadRequest msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } } else { @@ -212,18 +215,19 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var fields map[string]interface{} err := json.Unmarshal(bodyBytes, &fields) if err != nil { - httpStatus = http.StatusBadRequest + status = http.StatusBadRequest msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -238,18 +242,19 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // index documents in bulk count, err := h.client.IndexDocument(docs) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -259,28 +264,31 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } content, err = json.MarshalIndent(msgMap, "", " ") if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + + blasthttp.WriteResponse(w, content, status, h.logger) } type DeleteHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewDeleteDocumentHandler(client *grpc.Client, logger *log.Logger) *DeleteHandler { +func NewDeleteDocumentHandler(client *grpc.Client, logger *zap.Logger) *DeleteHandler { return &DeleteHandler{ client: client, logger: logger, @@ -288,13 +296,11 @@ func NewDeleteDocumentHandler(client *grpc.Client, logger *log.Logger) *DeleteHa } func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) // create documents ids := make([]string, 0) @@ -304,18 +310,19 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -323,18 +330,19 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Deleting documents in bulk err := json.Unmarshal(bodyBytes, &ids) if err != nil { - httpStatus = http.StatusBadRequest + status = http.StatusBadRequest msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } } else { @@ -345,18 +353,19 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // delete documents in bulk count, err := h.client.DeleteDocument(ids) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -366,28 +375,31 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } content, err = json.MarshalIndent(msgMap, "", " ") if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + + blasthttp.WriteResponse(w, content, status, h.logger) } type SearchHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewSearchHandler(client *grpc.Client, logger *log.Logger) *SearchHandler { +func NewSearchHandler(client *grpc.Client, logger *zap.Logger) *SearchHandler { return &SearchHandler{ client: client, logger: logger, @@ -395,28 +407,27 @@ func NewSearchHandler(client *grpc.Client, logger *log.Logger) *SearchHandler { } func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) searchRequestBytes, err := ioutil.ReadAll(r.Body) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -425,54 +436,58 @@ func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if len(searchRequestBytes) > 0 { err := json.Unmarshal(searchRequestBytes, searchRequest) if err != nil { - httpStatus = http.StatusBadRequest + status = http.StatusBadRequest msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } } searchResult, err := h.client.Search(searchRequest) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } content, err = json.MarshalIndent(&searchResult, "", " ") if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + blasthttp.WriteResponse(w, content, status, h.logger) } diff --git a/indexer/index.go b/indexer/index.go index db71e78..c8de8ad 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -16,14 +16,13 @@ package indexer import ( "encoding/json" - "log" "os" - "time" "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" ) type Index struct { @@ -31,27 +30,53 @@ type Index struct { indexConfig map[string]interface{} - logger *log.Logger + logger *zap.Logger } -func NewIndex(dir string, indexConfig map[string]interface{}, logger *log.Logger) (*Index, error) { - bleve.SetLog(logger) +func NewIndex(dir string, indexConfig map[string]interface{}, logger *zap.Logger) (*Index, error) { + //bleve.SetLog(logger) var index bleve.Index _, err := os.Stat(dir) if os.IsNotExist(err) { - // create new index - indexMappingSrc, err := json.Marshal(indexConfig["index_mapping"]) - if err != nil { - return nil, err - } + // default index mapping indexMapping := bleve.NewIndexMapping() - err = json.Unmarshal(indexMappingSrc, indexMapping) - if err != nil { - return nil, err + + // index mapping from config + indexMappingIntr, ok := indexConfig["index_mapping"] + if ok { + if indexMappingIntr != nil { + indexMappingBytes, err := json.Marshal(indexMappingIntr) + if err != nil { + logger.Error(err.Error()) + return nil, err + } + err = json.Unmarshal(indexMappingBytes, indexMapping) + if err != nil { + logger.Error(err.Error()) + return nil, err + } + } + } else { + logger.Error("missing index mapping") + } + + indexType, ok := indexConfig["index_type"].(string) + if !ok { + logger.Error("missing index type") + indexType = bleve.Config.DefaultIndexType } - index, err = bleve.NewUsing(dir, indexMapping, indexConfig["index_type"].(string), indexConfig["index_storage_type"].(string), nil) + + indexStorageType, ok := indexConfig["index_storage_type"].(string) + if !ok { + logger.Error("missing index storage type") + indexStorageType = bleve.Config.DefaultKVStore + } + + // create new index + index, err = bleve.NewUsing(dir, indexMapping, indexType, indexStorageType, nil) if err != nil { + logger.Error(err.Error()) return nil, err } } else { @@ -61,6 +86,7 @@ func NewIndex(dir string, indexConfig map[string]interface{}, logger *log.Logger "error_if_exists": false, }) if err != nil { + logger.Error(err.Error()) return nil, err } } @@ -75,6 +101,7 @@ func NewIndex(dir string, indexConfig map[string]interface{}, logger *log.Logger func (i *Index) Close() error { err := i.index.Close() if err != nil { + i.logger.Error(err.Error()) return err } @@ -82,23 +109,20 @@ func (i *Index) Close() error { } func (i *Index) Get(id string) (map[string]interface{}, error) { - start := time.Now() - defer func() { - i.logger.Printf("[DEBUG] get %s %f", id, float64(time.Since(start))/float64(time.Second)) - }() - fieldsBytes, err := i.index.GetInternal([]byte(id)) if err != nil { + i.logger.Error(err.Error()) return nil, err } if len(fieldsBytes) <= 0 { + i.logger.Error(blasterrors.ErrNotFound.Error()) return nil, blasterrors.ErrNotFound } - // bytes -> map[string]interface{} var fieldsMap map[string]interface{} err = json.Unmarshal(fieldsBytes, &fieldsMap) if err != nil { + i.logger.Error(err.Error()) return nil, err } @@ -106,14 +130,9 @@ func (i *Index) Get(id string) (map[string]interface{}, error) { } func (i *Index) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - start := time.Now() - defer func() { - rb, _ := json.Marshal(request) - i.logger.Printf("[DEBUG] search %s %f", rb, float64(time.Since(start))/float64(time.Second)) - }() - result, err := i.index.Search(request) if err != nil { + i.logger.Error(err.Error()) return nil, err } @@ -121,28 +140,24 @@ func (i *Index) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error } func (i *Index) Index(id string, fields map[string]interface{}) error { - start := time.Now() - defer func() { - i.logger.Printf("[DEBUG] index %s %v %f", id, fields, float64(time.Since(start))/float64(time.Second)) - }() - // index - i.logger.Printf("[DEBUG] index %s, %v", id, fields) err := i.index.Index(id, fields) if err != nil { + i.logger.Error(err.Error()) return err } - i.logger.Printf("[DEBUG] indexed %s, %v", id, fields) // map[string]interface{} -> bytes fieldsBytes, err := json.Marshal(fields) if err != nil { + i.logger.Error(err.Error()) return err } // set original document err = i.index.SetInternal([]byte(id), fieldsBytes) if err != nil { + i.logger.Error(err.Error()) return err } @@ -150,19 +165,16 @@ func (i *Index) Index(id string, fields map[string]interface{}) error { } func (i *Index) Delete(id string) error { - start := time.Now() - defer func() { - i.logger.Printf("[DEBUG] delete %s %f", id, float64(time.Since(start))/float64(time.Second)) - }() - err := i.index.Delete(id) if err != nil { + i.logger.Error(err.Error()) return err } // delete original document err = i.index.SetInternal([]byte(id), nil) if err != nil { + i.logger.Error(err.Error()) return err } @@ -170,23 +182,11 @@ func (i *Index) Delete(id string) error { } func (i *Index) Config() (map[string]interface{}, error) { - start := time.Now() - defer func() { - i.logger.Printf("[DEBUG] stats %f", float64(time.Since(start))/float64(time.Second)) - }() - return i.indexConfig, nil } func (i *Index) Stats() (map[string]interface{}, error) { - start := time.Now() - defer func() { - i.logger.Printf("[DEBUG] stats %f", float64(time.Since(start))/float64(time.Second)) - }() - - stats := i.index.StatsMap() - - return stats, nil + return i.index.StatsMap(), nil } func (i *Index) SnapshotItems() <-chan *protobuf.Document { @@ -195,13 +195,13 @@ func (i *Index) SnapshotItems() <-chan *protobuf.Document { go func() { idx, _, err := i.index.Advanced() if err != nil { - i.logger.Printf("[ERR] %v", err) + i.logger.Error(err.Error()) return } r, err := idx.Reader() if err != nil { - i.logger.Printf("[ERR] %v", err) + i.logger.Error(err.Error()) return } @@ -210,15 +210,15 @@ func (i *Index) SnapshotItems() <-chan *protobuf.Document { dr, err := r.DocIDReaderAll() for { if dr == nil { - i.logger.Printf("[ERR] %v", err) + i.logger.Error(err.Error()) break } id, err := dr.Next() if id == nil { - i.logger.Print("[DEBUG] finished to read all document ids") + i.logger.Debug("finished to read all document ids") break } else if err != nil { - i.logger.Printf("[WARN] %v", err) + i.logger.Warn(err.Error()) continue } @@ -229,16 +229,15 @@ func (i *Index) SnapshotItems() <-chan *protobuf.Document { var fieldsMap map[string]interface{} err = json.Unmarshal([]byte(fieldsBytes), &fieldsMap) if err != nil { - i.logger.Printf("[ERR] %v", err) + i.logger.Error(err.Error()) break } - i.logger.Printf("[DEBUG] %v", fieldsMap) // map[string]interface{} -> Any fieldsAny := &any.Any{} err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) if err != nil { - i.logger.Printf("[ERR] %v", err) + i.logger.Error(err.Error()) break } @@ -252,10 +251,10 @@ func (i *Index) SnapshotItems() <-chan *protobuf.Document { docCount = docCount + 1 } - i.logger.Print("[DEBUG] finished to write all documents to channel") + i.logger.Debug("finished to write all documents to channel") ch <- nil - i.logger.Printf("[INFO] snapshot total %d documents", docCount) + i.logger.Info("finished to snapshot", zap.Int("count", docCount)) return }() diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index 123cc33..0bc91b5 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -19,7 +19,6 @@ import ( "errors" "io" "io/ioutil" - "log" "sync" "github.com/blevesearch/bleve" @@ -27,6 +26,7 @@ import ( "github.com/hashicorp/raft" "github.com/mosuka/blast/maputils" "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" ) type RaftFSM struct { @@ -39,10 +39,10 @@ type RaftFSM struct { indexConfig map[string]interface{} - logger *log.Logger + logger *zap.Logger } -func NewRaftFSM(path string, indexConfig map[string]interface{}, logger *log.Logger) (*RaftFSM, error) { +func NewRaftFSM(path string, indexConfig map[string]interface{}, logger *zap.Logger) (*RaftFSM, error) { return &RaftFSM{ path: path, indexConfig: indexConfig, @@ -57,6 +57,7 @@ func (f *RaftFSM) Start() error { f.index, err = NewIndex(f.path, f.indexConfig, f.logger) if err != nil { + f.logger.Error(err.Error()) return err } @@ -66,6 +67,7 @@ func (f *RaftFSM) Start() error { func (f *RaftFSM) Stop() error { err := f.index.Close() if err != nil { + f.logger.Error(err.Error()) return err } @@ -78,6 +80,7 @@ func (f *RaftFSM) GetMetadata(id string) (map[string]interface{}, error) { value, err := f.metadata.Get(id) if err != nil { + f.logger.Error(err.Error()) return nil, err } @@ -90,6 +93,7 @@ func (f *RaftFSM) applySetMetadata(id string, value map[string]interface{}) inte err := f.metadata.Merge(id, value) if err != nil { + f.logger.Error(err.Error()) return err } @@ -102,6 +106,7 @@ func (f *RaftFSM) applyDeleteMetadata(id string) interface{} { err := f.metadata.Delete(id) if err != nil { + f.logger.Error(err.Error()) return err } @@ -109,8 +114,11 @@ func (f *RaftFSM) applyDeleteMetadata(id string) interface{} { } func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { + f.logger.Debug("get a document", zap.String("id", id)) + fields, err := f.index.Get(id) if err != nil { + f.logger.Error(err.Error()) return nil, err } @@ -118,11 +126,11 @@ func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { } func (f *RaftFSM) applyIndexDocument(id string, fields map[string]interface{}) interface{} { - f.logger.Printf("[DEBUG] index %s, %v", id, fields) + f.logger.Debug("apply to index a document", zap.String("id", id), zap.Any("fields", fields)) err := f.index.Index(id, fields) if err != nil { - f.logger.Printf("[ERR] %v", err) + f.logger.Error(err.Error()) return err } @@ -130,9 +138,11 @@ func (f *RaftFSM) applyIndexDocument(id string, fields map[string]interface{}) i } func (f *RaftFSM) applyDeleteDocument(id string) interface{} { + f.logger.Debug("apply to delete a document", zap.String("id", id)) + err := f.index.Delete(id) if err != nil { - f.logger.Printf("[ERR] %v", err) + f.logger.Error(err.Error()) return err } @@ -140,8 +150,11 @@ func (f *RaftFSM) applyDeleteDocument(id string) interface{} { } func (f *RaftFSM) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { + f.logger.Debug("search documents") + result, err := f.index.Search(request) if err != nil { + f.logger.Error(err.Error()) return nil, err } @@ -149,19 +162,20 @@ func (f *RaftFSM) Search(request *bleve.SearchRequest) (*bleve.SearchResult, err } func (f *RaftFSM) Apply(l *raft.Log) interface{} { + f.logger.Debug("apply a message") + var msg message err := json.Unmarshal(l.Data, &msg) if err != nil { return err } - f.logger.Printf("[DEBUG] Apply %v", msg) - switch msg.Command { case setNode: var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { + f.logger.Error(err.Error()) return err } return f.applySetMetadata(data["id"].(string), data["metadata"].(map[string]interface{})) @@ -169,6 +183,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { + f.logger.Error(err.Error()) return err } return f.applyDeleteMetadata(data["id"].(string)) @@ -176,6 +191,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { + f.logger.Error(err.Error()) return err } return f.applyIndexDocument(data["id"].(string), data["fields"].(map[string]interface{})) @@ -183,11 +199,14 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { var data string err := json.Unmarshal(msg.Data, &data) if err != nil { + f.logger.Error(err.Error()) return err } return f.applyDeleteDocument(data) default: - return errors.New("command type not support") + err = errors.New("command type not support") + f.logger.Error(err.Error()) + return err } } @@ -210,13 +229,13 @@ func (f *RaftFSM) Restore(rc io.ReadCloser) error { defer func() { err := rc.Close() if err != nil { - f.logger.Printf("[ERR] %v", err) + f.logger.Error(err.Error()) } }() data, err := ioutil.ReadAll(rc) if err != nil { - f.logger.Printf("[ERR] %v", err) + f.logger.Error(err.Error()) return err } @@ -230,30 +249,31 @@ func (f *RaftFSM) Restore(rc io.ReadCloser) error { break } if err != nil { - f.logger.Printf("[ERR] %v", err) - return err + f.logger.Error(err.Error()) + continue } fields, err := protobuf.MarshalAny(doc.Fields) if err != nil { - return err + f.logger.Error(err.Error()) + continue } if fields == nil { - return nil + f.logger.Error("value is nil") + continue } fieldsMap := *fields.(*map[string]interface{}) err = f.index.Index(doc.Id, fieldsMap) if err != nil { - f.logger.Printf("[ERR] %v", err) - return err + f.logger.Error(err.Error()) + continue } - f.logger.Printf("[DEBUG] restore %v %v", doc.Id, doc.Fields) docCount = docCount + 1 } - f.logger.Printf("[INFO] %d documents were restored", docCount) + f.logger.Info("restore", zap.Int("count", docCount)) return nil } @@ -262,14 +282,14 @@ func (f *RaftFSM) Restore(rc io.ReadCloser) error { type IndexFSMSnapshot struct { index *Index - logger *log.Logger + logger *zap.Logger } func (f *IndexFSMSnapshot) Persist(sink raft.SnapshotSink) error { defer func() { err := sink.Close() if err != nil { - f.logger.Printf("[ERR] %v", err) + f.logger.Error(err.Error()) } }() @@ -283,22 +303,26 @@ func (f *IndexFSMSnapshot) Persist(sink raft.SnapshotSink) error { break } - docCount = docCount + 1 docBytes, err := json.Marshal(doc) if err != nil { - return err + f.logger.Error(err.Error()) + continue } _, err = sink.Write(docBytes) if err != nil { - return err + f.logger.Error(err.Error()) + continue } + + docCount = docCount + 1 } - f.logger.Printf("[INFO] %d documents were persisted", docCount) + + f.logger.Info("persist", zap.Int("count", docCount)) return nil } func (f *IndexFSMSnapshot) Release() { - f.logger.Printf("[INFO] release") + f.logger.Info("release") } diff --git a/indexer/raft_server.go b/indexer/raft_server.go index d88cf52..ae82931 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -16,7 +16,8 @@ package indexer import ( "encoding/json" - "log" + "errors" + "io/ioutil" "net" "path/filepath" "time" @@ -25,7 +26,8 @@ import ( "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" _ "github.com/mosuka/blast/config" - "github.com/mosuka/blast/errors" + blasterrors "github.com/mosuka/blast/errors" + "go.uber.org/zap" ) type RaftServer struct { @@ -39,10 +41,10 @@ type RaftServer struct { indexConfig map[string]interface{} - logger *log.Logger + logger *zap.Logger } -func NewRaftServer(id string, metadata map[string]interface{}, bootstrap bool, indexConfig map[string]interface{}, logger *log.Logger) (*RaftServer, error) { +func NewRaftServer(id string, metadata map[string]interface{}, bootstrap bool, indexConfig map[string]interface{}, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ id: id, metadata: metadata, @@ -57,53 +59,77 @@ func NewRaftServer(id string, metadata map[string]interface{}, bootstrap bool, i func (s *RaftServer) Start() error { var err error - s.logger.Print("[INFO] create finite state machine") - s.fsm, err = NewRaftFSM(filepath.Join(s.metadata["data_dir"].(string), "index"), s.indexConfig, s.logger) + dataDir, ok := s.metadata["data_dir"].(string) + if !ok { + s.logger.Fatal("missing metadata", zap.String("data_dir", dataDir)) + return errors.New("missing metadata") + } + + bindAddr, ok := s.metadata["bind_addr"].(string) + if !ok { + s.logger.Fatal("missing metadata", zap.String("bind_addr", bindAddr)) + return errors.New("missing metadata") + } + + fsmPath := filepath.Join(dataDir, "index") + s.logger.Info("create finite state machine", zap.String("path", fsmPath)) + s.fsm, err = NewRaftFSM(fsmPath, s.indexConfig, s.logger) if err != nil { + s.logger.Fatal(err.Error()) return err } - s.logger.Print("[INFO] start finite state machine") + s.logger.Info("start finite state machine") err = s.fsm.Start() if err != nil { + s.logger.Fatal(err.Error()) return err } config := raft.DefaultConfig() config.LocalID = raft.ServerID(s.id) config.SnapshotThreshold = 1024 - config.Logger = s.logger + config.LogOutput = ioutil.Discard - addr, err := net.ResolveTCPAddr("tcp", s.metadata["bind_addr"].(string)) + s.logger.Info("resolve TCP address", zap.String("address", bindAddr)) + addr, err := net.ResolveTCPAddr("tcp", bindAddr) if err != nil { + s.logger.Fatal(err.Error()) return err } - // create transport - transport, err := raft.NewTCPTransportWithLogger(s.metadata["bind_addr"].(string), addr, 3, 10*time.Second, s.logger) + s.logger.Info("create TCP transport", zap.String("bind_addr", bindAddr)) + transport, err := raft.NewTCPTransport(bindAddr, addr, 3, 10*time.Second, ioutil.Discard) if err != nil { + s.logger.Fatal(err.Error()) return err } - // create snapshot store - snapshotStore, err := raft.NewFileSnapshotStoreWithLogger(s.metadata["data_dir"].(string), 2, s.logger) + snapshotPath := filepath.Join(dataDir, "snapshots") + s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) + snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) if err != nil { + s.logger.Fatal(err.Error()) return err } - // create raft log store - raftLogStore, err := raftboltdb.NewBoltStore(filepath.Join(s.metadata["data_dir"].(string), "raft.db")) + logStore := filepath.Join(dataDir, "raft.db") + s.logger.Info("create Raft log store", zap.String("path", logStore)) + raftLogStore, err := raftboltdb.NewBoltStore(logStore) if err != nil { + s.logger.Fatal(err.Error()) return err } - // create raft + s.logger.Info("create Raft machine") s.raft, err = raft.NewRaft(config, s.fsm, raftLogStore, raftLogStore, snapshotStore, transport) if err != nil { + s.logger.Fatal(err.Error()) return err } if s.bootstrap { + s.logger.Info("configure Raft machine as bootstrap") configuration := raft.Configuration{ Servers: []raft.Server{ { @@ -114,22 +140,18 @@ func (s *RaftServer) Start() error { } s.raft.BootstrapCluster(configuration) - // wait for detect a leader + s.logger.Info("wait for become a leader") err = s.WaitForDetectLeader(60 * time.Second) if err != nil { - if err == errors.ErrTimeout { - s.logger.Printf("[WARN] %v", err) - } else { - s.logger.Printf("[ERR] %v", err) - return err - } + s.logger.Fatal(err.Error()) + return err } // set metadata - s.logger.Print("[INFO] register itself in a cluster") + s.logger.Info("register its own information", zap.String("id", s.id), zap.Any("metadata", s.metadata)) err = s.setMetadata(s.id, s.metadata) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return nil } } @@ -138,16 +160,18 @@ func (s *RaftServer) Start() error { } func (s *RaftServer) Stop() error { - s.logger.Print("[INFO] shutdown Raft") + s.logger.Info("shutdown Raft machine") f := s.raft.Shutdown() err := f.Error() if err != nil { + s.logger.Error(err.Error()) return err } - s.logger.Print("[INFO] stop finite state machine") + s.logger.Info("stop finite state machine") err = s.fsm.Stop() if err != nil { + s.logger.Error(err.Error()) return err } @@ -157,6 +181,7 @@ func (s *RaftServer) Stop() error { func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() + timer := time.NewTimer(timeout) defer timer.Stop() @@ -165,10 +190,12 @@ func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, e case <-ticker.C: leaderAddr := s.raft.Leader() if leaderAddr != "" { + s.logger.Debug("detect a leader", zap.String("address", string(leaderAddr))) return leaderAddr, nil } case <-timer.C: - return "", errors.ErrTimeout + s.logger.Error("timeout exceeded") + return "", blasterrors.ErrTimeout } } } @@ -176,12 +203,14 @@ func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, e func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { leaderAddr, err := s.LeaderAddress(timeout) if err != nil { + s.logger.Error(err.Error()) return "", err } cf := s.raft.GetConfiguration() err = cf.Error() if err != nil { + s.logger.Error(err.Error()) return "", err } @@ -191,7 +220,8 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { } } - return "", errors.ErrNotFoundLeader + s.logger.Error(blasterrors.ErrNotFoundLeader.Error()) + return "", blasterrors.ErrNotFoundLeader } func (s *RaftServer) Stats() map[string]string { @@ -209,6 +239,7 @@ func (s *RaftServer) IsLeader() bool { func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { _, err := s.LeaderAddress(timeout) if err != nil { + s.logger.Error(err.Error()) return err } @@ -218,6 +249,7 @@ func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { func (s *RaftServer) getMetadata(id string) (map[string]interface{}, error) { metadata, err := s.fsm.GetMetadata(id) if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -225,6 +257,8 @@ func (s *RaftServer) getMetadata(id string) (map[string]interface{}, error) { } func (s *RaftServer) setMetadata(id string, metadata map[string]interface{}) error { + s.logger.Debug("set metadata", zap.String("id", id), zap.Any("metadata", metadata)) + msg, err := newMessage( setNode, map[string]interface{}{ @@ -233,17 +267,20 @@ func (s *RaftServer) setMetadata(id string, metadata map[string]interface{}) err }, ) if err != nil { + s.logger.Error(err.Error()) return err } msgBytes, err := json.Marshal(msg) if err != nil { + s.logger.Error(err.Error()) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } @@ -251,6 +288,8 @@ func (s *RaftServer) setMetadata(id string, metadata map[string]interface{}) err } func (s *RaftServer) deleteMetadata(id string) error { + s.logger.Debug("delete metadata", zap.String("id", id)) + msg, err := newMessage( deleteNode, map[string]interface{}{ @@ -258,17 +297,20 @@ func (s *RaftServer) deleteMetadata(id string) error { }, ) if err != nil { + s.logger.Error(err.Error()) return err } msgBytes, err := json.Marshal(msg) if err != nil { + s.logger.Error(err.Error()) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } @@ -276,9 +318,12 @@ func (s *RaftServer) deleteMetadata(id string) error { } func (s *RaftServer) GetMetadata(id string) (map[string]interface{}, error) { + s.logger.Debug("get metadata", zap.String("id", id)) + cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -287,6 +332,7 @@ func (s *RaftServer) GetMetadata(id string) (map[string]interface{}, error) { if server.ID == raft.ServerID(id) { metadata, err = s.getMetadata(id) if err != nil { + s.logger.Error(err.Error()) return nil, err } break @@ -297,72 +343,85 @@ func (s *RaftServer) GetMetadata(id string) (map[string]interface{}, error) { } func (s *RaftServer) SetMetadata(id string, metadata map[string]interface{}) error { + s.logger.Info("set metadata", zap.String("id", id), zap.Any("metadata", metadata)) + if !s.IsLeader() { + s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { + s.logger.Error(err.Error()) return err } for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(id) { - s.logger.Printf("[INFO] node %v already joined the cluster", id) + s.logger.Info("node already joined the cluster", zap.String("id", id)) return nil } } - f := s.raft.AddVoter(raft.ServerID(id), raft.ServerAddress(metadata["bind_addr"].(string)), 0, 0) + bindAddr, ok := metadata["bind_addr"].(string) + if !ok { + s.logger.Error("missing metadata", zap.String("bind_addr", bindAddr)) + return errors.New("missing metadata") + } + + s.logger.Info("add voter", zap.String("id", id), zap.String("address", bindAddr)) + f := s.raft.AddVoter(raft.ServerID(id), raft.ServerAddress(bindAddr), 0, 0) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } // set metadata err = s.setMetadata(id, metadata) if err != nil { - s.logger.Printf("[ERR] %v", err) - return nil + s.logger.Error(err.Error()) + return err } - s.logger.Printf("[INFO] node %v joined successfully", id) return nil } func (s *RaftServer) DeleteMetadata(id string) error { + s.logger.Info("delete metadata", zap.String("id", id)) + if !s.IsLeader() { + s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { + s.logger.Error(err.Error()) return err } for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(id) { + s.logger.Debug("remove server", zap.String("id", id)) f := s.raft.RemoveServer(server.ID, 0, 0) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } - - s.logger.Printf("[INFO] node %v leaved successfully", id) - return nil } } // delete metadata err = s.deleteMetadata(id) if err != nil { - s.logger.Printf("[ERR] %v", err) - return nil + s.logger.Error(err.Error()) + return err } - s.logger.Printf("[INFO] node %v does not exists in the cluster", id) return nil } @@ -370,6 +429,7 @@ func (s *RaftServer) GetServers() (map[string]interface{}, error) { cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -377,10 +437,8 @@ func (s *RaftServer) GetServers() (map[string]interface{}, error) { for _, server := range cf.Configuration().Servers { metadata, err := s.GetMetadata(string(server.ID)) if err != nil { - s.logger.Printf("[DEBUG] %v", err) - continue + s.logger.Warn(err.Error()) } - servers[string(server.ID)] = metadata } @@ -391,6 +449,7 @@ func (s *RaftServer) Snapshot() error { f := s.raft.Snapshot() err := f.Error() if err != nil { + s.logger.Error(err.Error()) return err } @@ -400,6 +459,7 @@ func (s *RaftServer) Snapshot() error { func (s *RaftServer) GetDocument(id string) (map[string]interface{}, error) { fields, err := s.fsm.GetDocument(id) if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -409,6 +469,7 @@ func (s *RaftServer) GetDocument(id string) (map[string]interface{}, error) { func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { result, err := s.fsm.Search(request) if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -416,7 +477,8 @@ func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, } func (s *RaftServer) IndexDocument(docs []map[string]interface{}) (int, error) { - if s.raft.State() != raft.Leader { + if !s.IsLeader() { + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return -1, raft.ErrNotLeader } @@ -427,17 +489,20 @@ func (s *RaftServer) IndexDocument(docs []map[string]interface{}) (int, error) { doc, ) if err != nil { + s.logger.Error(err.Error()) return -1, err } msgBytes, err := json.Marshal(msg) if err != nil { + s.logger.Error(err.Error()) return -1, err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return -1, err } @@ -448,7 +513,8 @@ func (s *RaftServer) IndexDocument(docs []map[string]interface{}) (int, error) { } func (s *RaftServer) DeleteDocument(ids []string) (int, error) { - if s.raft.State() != raft.Leader { + if !s.IsLeader() { + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return -1, raft.ErrNotLeader } @@ -459,17 +525,20 @@ func (s *RaftServer) DeleteDocument(ids []string) (int, error) { id, ) if err != nil { + s.logger.Error(err.Error()) return -1, err } msgBytes, err := json.Marshal(msg) if err != nil { + s.logger.Error(err.Error()) return -1, err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return -1, err } @@ -482,6 +551,7 @@ func (s *RaftServer) DeleteDocument(ids []string) (int, error) { func (s *RaftServer) GetIndexConfig() (map[string]interface{}, error) { indexConfig, err := s.fsm.GetIndexConfig() if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -491,6 +561,7 @@ func (s *RaftServer) GetIndexConfig() (map[string]interface{}, error) { func (s *RaftServer) GetIndexStats() (map[string]interface{}, error) { indexStats, err := s.fsm.GetIndexStats() if err != nil { + s.logger.Error(err.Error()) return nil, err } diff --git a/indexer/server.go b/indexer/server.go index f8b5af3..ce345d4 100644 --- a/indexer/server.go +++ b/indexer/server.go @@ -16,13 +16,13 @@ package indexer import ( "fmt" - "log" accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/http" "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" ) type Server struct { @@ -42,11 +42,11 @@ type Server struct { httpRouter *http.Router httpServer *http.Server - logger *log.Logger + logger *zap.Logger httpLogger accesslog.Logger } -func NewServer(managerAddr string, clusterId string, id string, metadata map[string]interface{}, peerAddr string, indexConfig map[string]interface{}, logger *log.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(managerAddr string, clusterId string, id string, metadata map[string]interface{}, peerAddr string, indexConfig map[string]interface{}, logger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ managerAddr: managerAddr, clusterId: clusterId, @@ -66,76 +66,82 @@ func NewServer(managerAddr string, clusterId string, id string, metadata map[str func (s *Server) Start() { // get peer from manager if s.managerAddr != "" { - s.logger.Printf("[INFO] connect to master %s", s.managerAddr) + s.logger.Info("connect to master", zap.String("master_addr", s.managerAddr)) mc, err := grpc.NewClient(s.managerAddr) defer func() { - s.logger.Printf("[DEBUG] close client for %v", mc.GetAddress()) + s.logger.Debug("close client", zap.String("address", mc.GetAddress())) err = mc.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return } }() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } - s.logger.Printf("[INFO] get nodes in cluster: %s", s.clusterId) + s.logger.Info("get nodes in cluster from master", zap.String("master_addr", mc.GetAddress()), zap.String("cluster", s.clusterId)) clusterIntr, err := mc.GetState(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterId)) - if err == errors.ErrNotFound { - // cluster does not found - s.logger.Printf("[INFO] cluster does not found: %s", s.clusterId) - } else if err != nil { - s.logger.Printf("[ERR] %v", err) + if err != nil && err != errors.ErrNotFound { + s.logger.Fatal(err.Error()) return - } else { - if clusterIntr == nil { - s.logger.Print("[INFO] value is nil") - } else { - cluster := *clusterIntr.(*map[string]interface{}) - for nodeId, nodeIntr := range cluster { - // skip if it is own node id - if nodeId == s.id { - continue - } - - // get the peer node address - metadata := nodeIntr.(map[string]interface{})["metadata"].(map[string]interface{}) - s.peerAddr = metadata["grpc_addr"].(string) - - s.logger.Printf("[INFO] peer node detected: %s", s.peerAddr) - - break + } + if clusterIntr != nil { + cluster := *clusterIntr.(*map[string]interface{}) + for nodeId, nodeIntr := range cluster { + if nodeId == s.id { + s.logger.Debug("skip own node id", zap.String("id", nodeId)) + continue + } + + // get the peer node address + metadata, ok := nodeIntr.(map[string]interface{})["metadata"].(map[string]interface{}) + if !ok { + s.logger.Error("missing metadata", zap.String("id", nodeId), zap.Any("metadata", metadata)) + continue } + + grpcAddr, ok := metadata["grpc_addr"].(string) + if !ok { + s.logger.Error("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + continue + } + + s.peerAddr = grpcAddr + + s.logger.Info("peer node detected", zap.String("peer_addr", s.peerAddr)) + + break } } } // bootstrap node? bootstrap := s.peerAddr == "" - s.logger.Printf("[INFO] bootstrap: %v", bootstrap) + s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) // get index config from manager or peer if s.managerAddr != "" { mc, err := grpc.NewClient(s.managerAddr) defer func() { - s.logger.Printf("[DEBUG] close client for %v", mc.GetAddress()) + s.logger.Debug("close client", zap.String("address", mc.GetAddress())) err = mc.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return } }() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } + s.logger.Debug("pull index config from master", zap.String("address", mc.GetAddress())) value, err := mc.GetState("index_config") if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } @@ -145,26 +151,28 @@ func (s *Server) Start() { } else if s.peerAddr != "" { pc, err := grpc.NewClient(s.peerAddr) defer func() { + s.logger.Debug("close client", zap.String("address", pc.GetAddress())) err = pc.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } }() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } + s.logger.Debug("pull index config from cluster peer", zap.String("address", pc.GetAddress())) resp, err := pc.GetIndexConfig() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } ins, err := protobuf.MarshalAny(resp.IndexConfig) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } @@ -176,68 +184,80 @@ func (s *Server) Start() { // create raft server s.raftServer, err = NewRaftServer(s.id, s.metadata, bootstrap, s.indexConfig, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // create gRPC service s.grpcService, err = NewGRPCService(s.managerAddr, s.clusterId, s.raftServer, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) + return + } + + grpcAddr, ok := s.metadata["grpc_addr"].(string) + if !ok { + s.logger.Fatal("missing gRPC address", zap.String("grpc_addr", grpcAddr)) return } // create gRPC server - s.grpcServer, err = grpc.NewServer(s.metadata["grpc_addr"].(string), s.grpcService, s.logger) + s.grpcServer, err = grpc.NewServer(grpcAddr, s.grpcService, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // create HTTP router - s.httpRouter, err = NewRouter(s.metadata["grpc_addr"].(string), s.logger) + s.httpRouter, err = NewRouter(grpcAddr, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) + return + } + + httpAddr, ok := s.metadata["http_addr"].(string) + if !ok { + s.logger.Fatal("missing HTTP address", zap.String("http_addr", httpAddr)) return } // create HTTP server - s.httpServer, err = http.NewServer(s.metadata["http_addr"].(string), s.httpRouter, s.logger, s.httpLogger) + s.httpServer, err = http.NewServer(httpAddr, s.httpRouter, s.logger, s.httpLogger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // start Raft server - s.logger.Print("[INFO] start Raft server") + s.logger.Info("start Raft server") err = s.raftServer.Start() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // start gRPC service - s.logger.Print("[INFO] start gRPC service") + s.logger.Info("start gRPC service") go func() { err := s.grpcService.Start() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } }() // start gRPC server - s.logger.Print("[INFO] start gRPC server") + s.logger.Info("start gRPC server") go func() { err := s.grpcServer.Start() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } }() // start HTTP server - s.logger.Print("[INFO] start HTTP server") + s.logger.Info("start HTTP server") go func() { _ = s.httpServer.Start() }() @@ -248,54 +268,49 @@ func (s *Server) Start() { defer func() { err := client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } }() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } err = client.SetNode(s.id, s.metadata) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } } } func (s *Server) Stop() { - // stop HTTP server - s.logger.Printf("[INFO] stop HTTP server") + s.logger.Info("stop HTTP server") err := s.httpServer.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop HTTP router err = s.httpRouter.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop gRPC server - s.logger.Printf("[INFO] stop gRPC server") + s.logger.Info("stop gRPC server") err = s.grpcServer.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop gRPC service - s.logger.Print("[INFO] stop gRPC service") + s.logger.Info("stop gRPC service") err = s.grpcService.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop Raft server - s.logger.Printf("[INFO] stop Raft server") + s.logger.Info("stop Raft server") err = s.raftServer.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } } diff --git a/logutils/http_logger.go b/logutils/http_logger.go new file mode 100644 index 0000000..bb4371f --- /dev/null +++ b/logutils/http_logger.go @@ -0,0 +1,90 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutils + +import ( + "io" + "log" + "os" + "strconv" + + accesslog "github.com/mash/go-accesslog" + "github.com/natefinch/lumberjack" +) + +func NewFileWriter(filename string, maxSize int, maxBackups int, maxAge int, compress bool) io.Writer { + var writer io.Writer + + switch filename { + case "", os.Stderr.Name(): + writer = os.Stderr + case os.Stdout.Name(): + writer = os.Stdout + default: + writer = &lumberjack.Logger{ + Filename: filename, + MaxSize: maxSize, // megabytes + MaxBackups: maxBackups, + MaxAge: maxAge, // days + Compress: compress, // disabled by default + } + } + + return writer +} + +type ApacheCombinedLogger struct { + logger *log.Logger +} + +func NewApacheCombinedLogger(filename string, maxSize int, maxBackups int, maxAge int, compress bool) *ApacheCombinedLogger { + writer := NewFileWriter(filename, maxSize, maxBackups, maxAge, compress) + return &ApacheCombinedLogger{ + logger: log.New(writer, "", 0), + } +} + +func (l ApacheCombinedLogger) Log(record accesslog.LogRecord) { + // Output log that formatted Apache combined. + size := "-" + if record.Size > 0 { + size = strconv.FormatInt(record.Size, 10) + } + + referer := "-" + if record.RequestHeader.Get("Referer") != "" { + referer = record.RequestHeader.Get("Referer") + } + + userAgent := "-" + if record.RequestHeader.Get("User-Agent") != "" { + userAgent = record.RequestHeader.Get("User-Agent") + } + + l.logger.Printf( + "%s - %s [%s] \"%s %s %s\" %d %s \"%s\" \"%s\" %.4f", + record.Ip, + record.Username, + record.Time.Format("02/Jan/2006 03:04:05 +0000"), + record.Method, + record.Uri, + record.Protocol, + record.Status, + size, + referer, + userAgent, + record.ElapsedTime.Seconds(), + ) +} diff --git a/logutils/logger.go b/logutils/logger.go new file mode 100644 index 0000000..28611dd --- /dev/null +++ b/logutils/logger.go @@ -0,0 +1,80 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutils + +import ( + "os" + + "github.com/natefinch/lumberjack" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func NewLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackups int, logMaxAge int, logCompress bool) *zap.Logger { + var ll zapcore.Level + switch logLevel { + case "DEBUG": + ll = zap.DebugLevel + case "INFO": + ll = zap.InfoLevel + case "WARN", "WARNING": + ll = zap.WarnLevel + case "ERR", "ERROR": + ll = zap.WarnLevel + case "DPANIC": + ll = zap.DPanicLevel + case "PANIC": + ll = zap.PanicLevel + case "FATAL": + ll = zap.FatalLevel + } + + var ws zapcore.WriteSyncer + if logFilename == "" { + ws = zapcore.AddSync(os.Stderr) + } else { + ws = zapcore.AddSync( + &lumberjack.Logger{ + Filename: logFilename, + MaxSize: logMaxSize, // megabytes + MaxBackups: logMaxBackups, + MaxAge: logMaxAge, // days + Compress: logCompress, + }, + ) + } + + ec := zap.NewProductionEncoderConfig() + ec.TimeKey = "_timestamp_" + ec.LevelKey = "_level_" + ec.NameKey = "_name_" + ec.CallerKey = "_caller_" + ec.MessageKey = "_message_" + ec.StacktraceKey = "_stacktrace_" + ec.EncodeTime = zapcore.ISO8601TimeEncoder + ec.EncodeCaller = zapcore.ShortCallerEncoder + + logger := zap.New( + zapcore.NewCore( + zapcore.NewJSONEncoder(ec), + ws, + ll, + ), + zap.AddCaller(), + //zap.AddStacktrace(ll), + ) + + return logger +} diff --git a/manager/grpc_service.go b/manager/grpc_service.go index 23d761e..c51998d 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -17,7 +17,6 @@ package manager import ( "context" "errors" - "log" "reflect" "strings" "sync" @@ -29,6 +28,7 @@ import ( blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -37,21 +37,21 @@ type GRPCService struct { *grpc.Service raftServer *RaftServer - logger *log.Logger + logger *zap.Logger - watchClusterStopCh chan struct{} - watchClusterDoneCh chan struct{} - peers map[string]interface{} - peerClients map[string]*grpc.Client - cluster map[string]interface{} - clusterChans map[chan protobuf.GetClusterResponse]struct{} - clusterMutex sync.RWMutex + updateClusterStopCh chan struct{} + updateClusterDoneCh chan struct{} + peers map[string]interface{} + peerClients map[string]*grpc.Client + cluster map[string]interface{} + clusterChans map[chan protobuf.GetClusterResponse]struct{} + clusterMutex sync.RWMutex stateChans map[chan protobuf.WatchStateResponse]struct{} stateMutex sync.RWMutex } -func NewGRPCService(raftServer *RaftServer, logger *log.Logger) (*GRPCService, error) { +func NewGRPCService(raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { return &GRPCService{ raftServer: raftServer, logger: logger, @@ -66,14 +66,14 @@ func NewGRPCService(raftServer *RaftServer, logger *log.Logger) (*GRPCService, e } func (s *GRPCService) Start() error { - s.logger.Print("[INFO] start update cluster") + s.logger.Info("start to update cluster info") go s.startUpdateCluster(500 * time.Millisecond) return nil } func (s *GRPCService) Stop() error { - s.logger.Print("[INFO] stop update cluster") + s.logger.Info("stop to update cluster info") s.stopUpdateCluster() return nil @@ -83,33 +83,42 @@ func (s *GRPCService) getLeaderClient() (*grpc.Client, error) { var client *grpc.Client for id, node := range s.cluster { - state := node.(map[string]interface{})["state"].(string) - if state != raft.Shutdown.String() { + nm, ok := node.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", id)) + continue + } + + state, ok := nm["state"].(string) + if !ok { + s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) + continue + } - if _, exist := s.peerClients[id]; exist { - client = s.peerClients[id] - break + if state == raft.Leader.String() { + client, ok = s.peerClients[id] + if ok { + return client, nil } else { - s.logger.Printf("[DEBUG] %v does not exist", id) + s.logger.Error("node does not exist", zap.String("id", id)) } + } else { + s.logger.Debug("not a leader", zap.String("id", id)) } } - if client == nil { - return nil, errors.New("client does not exist") - } + err := errors.New("available client does not exist") + s.logger.Error(err.Error()) - return client, nil + return nil, err } func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { - s.watchClusterStopCh = make(chan struct{}) - s.watchClusterDoneCh = make(chan struct{}) - - s.logger.Printf("[INFO] start watching a cluster") + s.updateClusterStopCh = make(chan struct{}) + s.updateClusterDoneCh = make(chan struct{}) defer func() { - close(s.watchClusterDoneCh) + close(s.updateClusterDoneCh) }() ticker := time.NewTicker(checkInterval) @@ -117,14 +126,14 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { for { select { - case <-s.watchClusterStopCh: - s.logger.Print("[DEBUG] receive request that stop watching a cluster") + case <-s.updateClusterStopCh: + s.logger.Info("received a request to stop updating a cluster") return case <-ticker.C: // get servers servers, err := s.raftServer.GetServers() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return } @@ -139,77 +148,105 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { if !reflect.DeepEqual(s.peers, peers) { // open clients for id, metadata := range peers { - grpcAddr := metadata.(map[string]interface{})["grpc_addr"].(string) + mm, ok := metadata.(map[string]interface{}) + if !ok { + s.logger.Warn("assertion failed", zap.String("id", id)) + continue + } + + grpcAddr, ok := mm["grpc_addr"].(string) + if !ok { + s.logger.Warn("missing metadata", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + continue + } + + client, exist := s.peerClients[id] + if exist { + s.logger.Debug("client has already exist in peer list", zap.String("id", id)) - if _, clientExists := s.peerClients[id]; clientExists { - client := s.peerClients[id] if client.GetAddress() != grpcAddr { - s.logger.Printf("[DEBUG] close client for %s", client.GetAddress()) + s.logger.Debug("gRPC address has been changed", zap.String("id", id), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + s.logger.Debug("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + + delete(s.peerClients, id) + err = client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error(), zap.String("id", id)) } - s.logger.Printf("[DEBUG] create client for %s", grpcAddr) newClient, err := grpc.NewClient(grpcAddr) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", grpcAddr)) } - if client != nil { - s.logger.Printf("[DEBUG] create client for %s", newClient.GetAddress()) + if newClient != nil { s.peerClients[id] = newClient } + } else { + s.logger.Debug("gRPC address has not changed", zap.String("id", id), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) } } else { - s.logger.Printf("[DEBUG] create client for %s", grpcAddr) + s.logger.Debug("client does not exist in peer list", zap.String("id", id)) + + s.logger.Debug("create gRPC client", zap.String("id", id), zap.String("grpc_addr", grpcAddr)) newClient, err := grpc.NewClient(grpcAddr) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", grpcAddr)) + } + if newClient != nil { + s.peerClients[id] = newClient } - - s.peerClients[id] = newClient } } // close nonexistent clients - for id := range s.peers { - if _, peerExists := peers[id]; !peerExists { - if _, clientExists := s.peerClients[id]; clientExists { - client := s.peerClients[id] - - s.logger.Printf("[DEBUG] close client for %s", client.GetAddress()) - err = client.Close() - if err != nil { - s.logger.Printf("[ERR] %v", err) - } + for id, client := range s.peerClients { + if metadata, exist := peers[id]; !exist { + s.logger.Info("this client is no longer in use", zap.String("id", id), zap.Any("metadata", metadata)) - delete(s.peerClients, id) + s.logger.Debug("close client", zap.String("id", id), zap.String("address", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("address", client.GetAddress())) } + + s.logger.Debug("delete client", zap.String("id", id)) + delete(s.peerClients, id) } } // keep current peer nodes s.peers = peers + s.logger.Debug("peers", zap.Any("peers", s.peers)) } // get cluster + cluster := make(map[string]interface{}, 0) ctx, _ := grpc.NewContext() resp, err := s.GetCluster(ctx, &empty.Empty{}) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } clusterIntr, err := protobuf.MarshalAny(resp.Cluster) - cluster := *clusterIntr.(*map[string]interface{}) + if err != nil { + s.logger.Error(err.Error()) + } + if clusterIntr == nil { + s.logger.Error("unexpected value") + } + cluster = *clusterIntr.(*map[string]interface{}) // notify current cluster if !reflect.DeepEqual(s.cluster, cluster) { for c := range s.clusterChans { + s.logger.Debug("notify cluster changes to client", zap.Any("response", resp)) c <- *resp } // keep current cluster s.cluster = cluster + s.logger.Debug("cluster", zap.Any("cluster", cluster)) } default: time.Sleep(100 * time.Millisecond) @@ -218,36 +255,40 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { } func (s *GRPCService) stopUpdateCluster() { - // close clients - s.logger.Printf("[INFO] close peer clients") - for _, client := range s.peerClients { - s.logger.Printf("[DEBUG] close peer client for %s", client.GetAddress()) + s.logger.Info("close all peer clients") + for id, client := range s.peerClients { + s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) err := client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } } - // stop watching a cluster - if s.watchClusterStopCh != nil { - s.logger.Printf("[INFO] stop watching a cluster") - close(s.watchClusterStopCh) + if s.updateClusterStopCh != nil { + s.logger.Info("send a request to stop updating a cluster") + close(s.updateClusterStopCh) } - // wait for stop watching a cluster has done - s.logger.Printf("[INFO] wait for stop watching a cluster has done") - <-s.watchClusterDoneCh + s.logger.Info("wait for the cluster update to stop") + <-s.updateClusterDoneCh + s.logger.Info("the cluster update has been stopped") } func (s *GRPCService) getSelfNode() (map[string]interface{}, error) { - metadata, err := s.raftServer.GetMetadata(s.raftServer.id) - if err != nil { - s.logger.Printf("[ERR] %v", err) - } + var node map[string]interface{} - node := map[string]interface{}{ - "metadata": metadata, - "state": s.raftServer.State(), + metadata, err := s.raftServer.GetMetadata(s.raftServer.id) + if err == nil { + node = map[string]interface{}{ + "metadata": metadata, + "state": s.raftServer.State(), + } + } else { + s.logger.Error(err.Error()) + node = map[string]interface{}{ + "metadata": map[string]interface{}{}, + "state": raft.Shutdown.String(), + } } return node, nil @@ -260,17 +301,17 @@ func (s *GRPCService) getPeerNode(id string) (map[string]interface{}, error) { if client, exist := s.peerClients[id]; exist { node, err = client.GetNode(id) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) node = map[string]interface{}{ "metadata": map[string]interface{}{}, "state": raft.Shutdown.String(), } } } else { - s.logger.Printf("[ERR] %v does not exist", id) + s.logger.Error("node does not exist in peer list", zap.String("id", id)) node = map[string]interface{}{ "metadata": map[string]interface{}{}, - "state": "Gone", + "state": raft.Shutdown.String(), } } @@ -288,26 +329,25 @@ func (s *GRPCService) GetNode(ctx context.Context, req *protobuf.GetNodeRequest) node, err = s.getPeerNode(req.Id) } if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } metadataAny := &any.Any{} - state := "Gone" - if node != nil { - if _, exist := node["metadata"]; exist { - if node["metadata"] != nil { - err = protobuf.UnmarshalAny(node["metadata"].(map[string]interface{}), metadataAny) - if err != nil { - return resp, status.Error(codes.Internal, err.Error()) - } - } + if metadata, exist := node["metadata"]; exist { + err = protobuf.UnmarshalAny(metadata.(map[string]interface{}), metadataAny) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } + } else { + s.logger.Error("missing metadata", zap.Any("metadata", metadata)) + } - if _, exist := node["state"]; exist { - if node["state"] != nil { - state = node["state"].(string) - } - } + state, exist := node["state"].(string) + if !exist { + s.logger.Error("missing node state", zap.String("state", state)) + state = raft.Shutdown.String() } resp.Metadata = metadataAny @@ -321,6 +361,7 @@ func (s *GRPCService) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) ins, err := protobuf.MarshalAny(req.Metadata) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -329,16 +370,19 @@ func (s *GRPCService) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) if s.raftServer.IsLeader() { err = s.raftServer.SetMetadata(req.Id, metadata) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } else { // forward to leader client, err := s.getLeaderClient() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } err = client.SetNode(req.Id, metadata) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } @@ -352,16 +396,19 @@ func (s *GRPCService) DeleteNode(ctx context.Context, req *protobuf.DeleteNodeRe if s.raftServer.IsLeader() { err := s.raftServer.DeleteMetadata(req.Id) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } else { // forward to leader client, err := s.getLeaderClient() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } err = client.DeleteNode(req.Id) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } @@ -374,6 +421,7 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protob servers, err := s.raftServer.GetServers() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -381,11 +429,13 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protob for id := range servers { nodeResp, err := s.GetNode(ctx, &protobuf.GetNodeRequest{Id: id}) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } metadataIntr, err := protobuf.MarshalAny(nodeResp.Metadata) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } metadata := *metadataIntr.(*map[string]interface{}) @@ -401,6 +451,7 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protob clusterAny := &any.Any{} err = protobuf.UnmarshalAny(cluster, clusterAny) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -426,6 +477,7 @@ func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_Watch for resp := range chans { err := server.Send(&resp) if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } } @@ -443,6 +495,7 @@ func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Em err := s.raftServer.Snapshot() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -459,6 +512,7 @@ func (s *GRPCService) GetState(ctx context.Context, req *protobuf.GetStateReques value, err := s.raftServer.GetState(req.Key) if err != nil { + s.logger.Error(err.Error()) switch err { case blasterrors.ErrNotFound: return resp, status.Error(codes.NotFound, err.Error()) @@ -470,6 +524,7 @@ func (s *GRPCService) GetState(ctx context.Context, req *protobuf.GetStateReques valueAny := &any.Any{} err = protobuf.UnmarshalAny(value, valueAny) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } @@ -488,12 +543,14 @@ func (s *GRPCService) SetState(ctx context.Context, req *protobuf.SetStateReques value, err := protobuf.MarshalAny(req.Value) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } if s.raftServer.IsLeader() { err = s.raftServer.SetState(req.Key, value) if err != nil { + s.logger.Error(err.Error()) switch err { case blasterrors.ErrNotFound: return resp, status.Error(codes.NotFound, err.Error()) @@ -505,10 +562,12 @@ func (s *GRPCService) SetState(ctx context.Context, req *protobuf.SetStateReques // forward to leader client, err := s.getLeaderClient() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } err = client.SetState(req.Key, value) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } @@ -531,13 +590,12 @@ func (s *GRPCService) DeleteState(ctx context.Context, req *protobuf.DeleteState s.stateMutex.Unlock() }() - s.logger.Printf("[INFO] set %v", req) - resp := &empty.Empty{} if s.raftServer.IsLeader() { err := s.raftServer.DeleteState(req.Key) if err != nil { + s.logger.Error(err.Error()) switch err { case blasterrors.ErrNotFound: return resp, status.Error(codes.NotFound, err.Error()) @@ -549,10 +607,12 @@ func (s *GRPCService) DeleteState(ctx context.Context, req *protobuf.DeleteState // forward to leader client, err := s.getLeaderClient() if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } err = client.DeleteState(req.Key) if err != nil { + s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } } @@ -588,6 +648,7 @@ func (s *GRPCService) WatchState(req *protobuf.WatchStateRequest, server protobu } err := server.Send(&resp) if err != nil { + s.logger.Error(err.Error()) return status.Error(codes.Internal, err.Error()) } } diff --git a/manager/http_router.go b/manager/http_router.go index 04c0caf..3b61e1b 100644 --- a/manager/http_router.go +++ b/manager/http_router.go @@ -17,8 +17,8 @@ package manager import ( "encoding/json" "io/ioutil" - "log" "net/http" + "time" "github.com/gorilla/mux" blasterrors "github.com/mosuka/blast/errors" @@ -26,9 +26,10 @@ import ( blasthttp "github.com/mosuka/blast/http" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" ) -func NewRouter(grpcAddr string, logger *log.Logger) (*blasthttp.Router, error) { +func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { router, err := blasthttp.NewRouter(grpcAddr, logger) if err != nil { return nil, err @@ -49,23 +50,21 @@ func NewRouter(grpcAddr string, logger *log.Logger) (*blasthttp.Router, error) { } type RootHandler struct { - logger *log.Logger + logger *zap.Logger } -func NewRootHandler(logger *log.Logger) *RootHandler { +func NewRootHandler(logger *zap.Logger) *RootHandler { return &RootHandler{ logger: logger, } } func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() + start := time.Now() status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, status, h.logger) - //blasthttp.RecordMetrics(start, status, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) msgMap := map[string]interface{}{ "version": version.Version, @@ -74,16 +73,18 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { content, err := blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + + blasthttp.WriteResponse(w, content, status, h.logger) } type GetHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewGetHandler(client *grpc.Client, logger *log.Logger) *GetHandler { +func NewGetHandler(client *grpc.Client, logger *zap.Logger) *GetHandler { return &GetHandler{ client: client, logger: logger, @@ -91,13 +92,11 @@ func NewGetHandler(client *grpc.Client, logger *log.Logger) *GetHandler { } func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) vars := mux.Vars(r) @@ -107,49 +106,53 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { switch err { case blasterrors.ErrNotFound: - httpStatus = http.StatusNotFound + status = http.StatusNotFound default: - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError } msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } // interface{} -> []byte content, err = json.MarshalIndent(value, "", " ") if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + + blasthttp.WriteResponse(w, content, status, h.logger) } type PutHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewPutHandler(client *grpc.Client, logger *log.Logger) *PutHandler { +func NewPutHandler(client *grpc.Client, logger *zap.Logger) *PutHandler { return &PutHandler{ client: client, logger: logger, @@ -157,13 +160,11 @@ func NewPutHandler(client *grpc.Client, logger *log.Logger) *PutHandler { } func (h *PutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) vars := mux.Vars(r) @@ -171,18 +172,19 @@ func (h *PutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } @@ -190,45 +192,49 @@ func (h *PutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var value interface{} err = json.Unmarshal(bodyBytes, &value) if err != nil { - httpStatus = http.StatusBadRequest + status = http.StatusBadRequest msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } err = h.client.SetState(key, value) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + + blasthttp.WriteResponse(w, content, status, h.logger) } type DeleteHandler struct { client *grpc.Client - logger *log.Logger + logger *zap.Logger } -func NewDeleteHandler(client *grpc.Client, logger *log.Logger) *DeleteHandler { +func NewDeleteHandler(client *grpc.Client, logger *zap.Logger) *DeleteHandler { return &DeleteHandler{ client: client, logger: logger, @@ -236,13 +242,11 @@ func NewDeleteHandler(client *grpc.Client, logger *log.Logger) *DeleteHandler { } func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - //start := time.Now() - httpStatus := http.StatusOK + start := time.Now() + status := http.StatusOK content := make([]byte, 0) - defer func() { - blasthttp.WriteResponse(w, content, httpStatus, h.logger) - //blasthttp.RecordMetrics(start, httpStatus, w, r, h.logger) - }() + + defer blasthttp.RecordMetrics(start, status, w, r) vars := mux.Vars(r) @@ -250,18 +254,21 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err := h.client.DeleteState(key) if err != nil { - httpStatus = http.StatusInternalServerError + status = http.StatusInternalServerError msgMap := map[string]interface{}{ "message": err.Error(), - "status": httpStatus, + "status": status, } content, err = blasthttp.NewJSONMessage(msgMap) if err != nil { - h.logger.Printf("[ERR] %v", err) + h.logger.Error(err.Error()) } + blasthttp.WriteResponse(w, content, status, h.logger) return } + + blasthttp.WriteResponse(w, content, status, h.logger) } diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go index 4110348..8febfbb 100644 --- a/manager/raft_fsm.go +++ b/manager/raft_fsm.go @@ -19,24 +19,23 @@ import ( "errors" "io" "io/ioutil" - "log" "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/maputils" + "go.uber.org/zap" ) type RaftFSM struct { metadata maputils.Map path string - data maputils.Map - logger *log.Logger + logger *zap.Logger } -func NewRaftFSM(path string, logger *log.Logger) (*RaftFSM, error) { +func NewRaftFSM(path string, logger *zap.Logger) (*RaftFSM, error) { return &RaftFSM{ path: path, logger: logger, @@ -44,8 +43,10 @@ func NewRaftFSM(path string, logger *log.Logger) (*RaftFSM, error) { } func (f *RaftFSM) Start() error { - f.logger.Print("[INFO] initialize data") + f.logger.Info("initialize metadata") f.metadata = maputils.Map{} + + f.logger.Info("initialize store data") f.data = maputils.Map{} return nil @@ -58,6 +59,7 @@ func (f *RaftFSM) Stop() error { func (f *RaftFSM) GetMetadata(id string) (map[string]interface{}, error) { value, err := f.metadata.Get(id) if err != nil { + f.logger.Error(err.Error(), zap.String("id", id)) return nil, err } @@ -67,6 +69,7 @@ func (f *RaftFSM) GetMetadata(id string) (map[string]interface{}, error) { func (f *RaftFSM) applySetMetadata(id string, value map[string]interface{}) interface{} { err := f.metadata.Merge(id, value) if err != nil { + f.logger.Error(err.Error(), zap.String("id", id), zap.Any("value", value)) return err } @@ -76,6 +79,7 @@ func (f *RaftFSM) applySetMetadata(id string, value map[string]interface{}) inte func (f *RaftFSM) applyDeleteMetadata(id string) interface{} { err := f.metadata.Delete(id) if err != nil { + f.logger.Error(err.Error(), zap.String("id", id)) return err } @@ -87,8 +91,10 @@ func (f *RaftFSM) Get(key string) (interface{}, error) { if err != nil { switch err { case maputils.ErrNotFound: + f.logger.Debug("key does not found in the store data", zap.String("key", key)) return nil, blasterrors.ErrNotFound default: + f.logger.Error(err.Error(), zap.String("key", key)) return nil, err } } @@ -108,11 +114,13 @@ func (f *RaftFSM) applySet(key string, value interface{}, merge bool) interface{ if merge { err := f.data.Merge(key, value) if err != nil { + f.logger.Error(err.Error(), zap.String("key", key), zap.Any("value", value), zap.Bool("merge", merge)) return err } } else { err := f.data.Set(key, value) if err != nil { + f.logger.Error(err.Error(), zap.String("key", key), zap.Any("value", value), zap.Bool("merge", merge)) return err } } @@ -151,10 +159,14 @@ func (f *RaftFSM) delete(keys []string, data interface{}) (interface{}, error) { func (f *RaftFSM) applyDelete(key string) interface{} { err := f.data.Delete(key) if err != nil { - if err == maputils.ErrNotFound { + switch err { + case maputils.ErrNotFound: + f.logger.Debug("key does not found in the store data", zap.String("key", key)) return blasterrors.ErrNotFound + default: + f.logger.Error(err.Error(), zap.String("key", key)) + return err } - return err } return nil @@ -164,6 +176,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { var msg message err := json.Unmarshal(l.Data, &msg) if err != nil { + f.logger.Error(err.Error()) return err } @@ -172,6 +185,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { + f.logger.Error(err.Error()) return err } return f.applySetMetadata(data["id"].(string), data["metadata"].(map[string]interface{})) @@ -179,6 +193,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { + f.logger.Error(err.Error()) return err } return f.applyDeleteMetadata(data["id"].(string)) @@ -186,24 +201,28 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { + f.logger.Error(err.Error()) return err } - return f.applySet(data["key"].(string), data["value"], true) case deleteKeyValue: var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { + f.logger.Error(err.Error()) return err } - return f.applyDelete(data["key"].(string)) default: - return errors.New("command type not support") + err = errors.New("command type not support") + f.logger.Error(err.Error()) + return err } } func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { + f.logger.Info("snapshot") + return &RaftFSMSnapshot{ data: f.data, logger: f.logger, @@ -211,24 +230,24 @@ func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { } func (f *RaftFSM) Restore(rc io.ReadCloser) error { - f.logger.Print("[INFO] restore data") + f.logger.Info("restore") defer func() { err := rc.Close() if err != nil { - f.logger.Printf("[ERR] %v", err) + f.logger.Error(err.Error()) } }() data, err := ioutil.ReadAll(rc) if err != nil { - f.logger.Printf("[ERR] %v", err) + f.logger.Error(err.Error()) return err } err = json.Unmarshal(data, &f.data) if err != nil { - f.logger.Printf("[ERR] %v", err) + f.logger.Error(err.Error()) return err } @@ -237,26 +256,28 @@ func (f *RaftFSM) Restore(rc io.ReadCloser) error { type RaftFSMSnapshot struct { data maputils.Map - logger *log.Logger + logger *zap.Logger } func (f *RaftFSMSnapshot) Persist(sink raft.SnapshotSink) error { - f.logger.Printf("[INFO] persist data") + f.logger.Info("persist") defer func() { err := sink.Close() if err != nil { - f.logger.Printf("[ERR] %v", err) + f.logger.Error(err.Error()) } }() buff, err := json.Marshal(f.data) if err != nil { + f.logger.Error(err.Error()) return err } _, err = sink.Write(buff) if err != nil { + f.logger.Error(err.Error()) return err } @@ -264,5 +285,5 @@ func (f *RaftFSMSnapshot) Persist(sink raft.SnapshotSink) error { } func (f *RaftFSMSnapshot) Release() { - f.logger.Printf("[INFO] release") + f.logger.Info("release") } diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go index f6ee819..983d0a2 100644 --- a/manager/raft_fsm_test.go +++ b/manager/raft_fsm_test.go @@ -16,10 +16,11 @@ package manager import ( "io/ioutil" - "log" "os" "reflect" "testing" + + "github.com/mosuka/blast/logutils" ) func TestRaftFSM_GetNode(t *testing.T) { @@ -34,7 +35,7 @@ func TestRaftFSM_GetNode(t *testing.T) { } }() - logger := log.New(os.Stderr, "", 0) + logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) fsm, err := NewRaftFSM(tmp, logger) if err != nil { @@ -96,7 +97,7 @@ func TestRaftFSM_SetNode(t *testing.T) { } }() - logger := log.New(os.Stderr, "", 0) + logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) fsm, err := NewRaftFSM(tmp, logger) if err != nil { @@ -178,7 +179,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { } }() - logger := log.New(os.Stderr, "", 0) + logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) fsm, err := NewRaftFSM(tmp, logger) if err != nil { @@ -250,7 +251,7 @@ func TestRaftFSM_Get(t *testing.T) { } }() - logger := log.New(os.Stderr, "", 0) + logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) fsm, err := NewRaftFSM(tmp, logger) if err != nil { @@ -293,7 +294,7 @@ func TestRaftFSM_Set(t *testing.T) { } }() - logger := log.New(os.Stderr, "", 0) + logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) fsm, err := NewRaftFSM(tmp, logger) if err != nil { @@ -432,7 +433,7 @@ func TestRaftFSM_Delete(t *testing.T) { } }() - logger := log.New(os.Stderr, "", 0) + logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) fsm, err := NewRaftFSM(tmp, logger) if err != nil { diff --git a/manager/raft_server.go b/manager/raft_server.go index 75037e0..6babeed 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -16,7 +16,8 @@ package manager import ( "encoding/json" - "log" + "errors" + "io/ioutil" "net" "path/filepath" "sync" @@ -25,7 +26,8 @@ import ( "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" _ "github.com/mosuka/blast/config" - "github.com/mosuka/blast/errors" + blasterrors "github.com/mosuka/blast/errors" + "go.uber.org/zap" ) type RaftServer struct { @@ -39,11 +41,11 @@ type RaftServer struct { indexConfig map[string]interface{} - logger *log.Logger + logger *zap.Logger mu sync.RWMutex } -func NewRaftServer(id string, metadata map[string]interface{}, bootstrap bool, indexConfig map[string]interface{}, logger *log.Logger) (*RaftServer, error) { +func NewRaftServer(id string, metadata map[string]interface{}, bootstrap bool, indexConfig map[string]interface{}, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ id: id, metadata: metadata, @@ -58,56 +60,78 @@ func NewRaftServer(id string, metadata map[string]interface{}, bootstrap bool, i func (s *RaftServer) Start() error { var err error - s.logger.Print("[INFO] create finite state machine") - s.fsm, err = NewRaftFSM(filepath.Join(s.metadata["data_dir"].(string), "store"), s.logger) + dataDir, ok := s.metadata["data_dir"].(string) + if !ok { + s.logger.Fatal("missing metadata", zap.String("data_dir", dataDir)) + return errors.New("missing metadata") + } + + bindAddr, ok := s.metadata["bind_addr"].(string) + if !ok { + s.logger.Fatal("missing metadata", zap.String("bind_addr", bindAddr)) + return errors.New("missing metadata") + } + + fsmPath := filepath.Join(dataDir, "store") + s.logger.Info("create finite state machine", zap.String("path", fsmPath)) + s.fsm, err = NewRaftFSM(fsmPath, s.logger) if err != nil { + s.logger.Fatal(err.Error()) return err } - s.logger.Print("[INFO] start finite state machine") + s.logger.Info("start finite state machine") err = s.fsm.Start() if err != nil { + s.logger.Fatal(err.Error()) return err } - s.logger.Print("[INFO] initialize Raft") + s.logger.Info("create Raft config", zap.String("id", s.id)) config := raft.DefaultConfig() config.LocalID = raft.ServerID(s.id) config.SnapshotThreshold = 1024 - config.Logger = s.logger + config.LogOutput = ioutil.Discard - addr, err := net.ResolveTCPAddr("tcp", s.metadata["bind_addr"].(string)) + s.logger.Info("resolve TCP address", zap.String("address", bindAddr)) + addr, err := net.ResolveTCPAddr("tcp", bindAddr) if err != nil { + s.logger.Fatal(err.Error()) return err } - // create transport - transport, err := raft.NewTCPTransportWithLogger(s.metadata["bind_addr"].(string), addr, 3, 10*time.Second, s.logger) + s.logger.Info("create TCP transport", zap.String("bind_addr", bindAddr)) + transport, err := raft.NewTCPTransport(bindAddr, addr, 3, 10*time.Second, ioutil.Discard) if err != nil { + s.logger.Fatal(err.Error()) return err } - // create snapshot store - snapshotStore, err := raft.NewFileSnapshotStoreWithLogger(s.metadata["data_dir"].(string), 2, s.logger) + snapshotPath := filepath.Join(dataDir, "snapshots") + s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) + snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) if err != nil { + s.logger.Fatal(err.Error()) return err } - // create raft log store - raftLogStore, err := raftboltdb.NewBoltStore(filepath.Join(s.metadata["data_dir"].(string), "raft.db")) + logStore := filepath.Join(dataDir, "raft.db") + s.logger.Info("create Raft log store", zap.String("path", logStore)) + raftLogStore, err := raftboltdb.NewBoltStore(logStore) if err != nil { + s.logger.Fatal(err.Error()) return err } - // create raft - s.logger.Print("[INFO] start Raft") + s.logger.Info("create Raft machine") s.raft, err = raft.NewRaft(config, s.fsm, raftLogStore, raftLogStore, snapshotStore, transport) if err != nil { + s.logger.Fatal(err.Error()) return err } if s.bootstrap { - s.logger.Print("[INFO] configure Raft as bootstrap") + s.logger.Info("configure Raft machine as bootstrap") configuration := raft.Configuration{ Servers: []raft.Server{ { @@ -118,32 +142,27 @@ func (s *RaftServer) Start() error { } s.raft.BootstrapCluster(configuration) - // wait for become a leader - s.logger.Print("[INFO] wait for become a leader") + s.logger.Info("wait for become a leader") err = s.WaitForDetectLeader(60 * time.Second) if err != nil { - if err == errors.ErrTimeout { - s.logger.Printf("[WARN] %v", err) - } else { - s.logger.Printf("[ERR] %v", err) - return nil - } + s.logger.Fatal(err.Error()) + return err } // set metadata - s.logger.Print("[INFO] register itself in a cluster") + s.logger.Info("register its own information", zap.String("id", s.id), zap.Any("metadata", s.metadata)) err = s.setMetadata(s.id, s.metadata) if err != nil { - s.logger.Printf("[ERR] %v", err) - return nil + s.logger.Fatal(err.Error()) + return err } // set index config - s.logger.Print("[INFO] register index config") + s.logger.Info("register index config") err = s.setIndexConfig(s.indexConfig) if err != nil { - s.logger.Printf("[ERR] %v", err) - return nil + s.logger.Fatal(err.Error()) + return err } } @@ -151,16 +170,18 @@ func (s *RaftServer) Start() error { } func (s *RaftServer) Stop() error { - s.logger.Print("[INFO] shutdown Raft") + s.logger.Info("shutdown Raft machine") f := s.raft.Shutdown() err := f.Error() if err != nil { + s.logger.Error(err.Error()) return err } - s.logger.Print("[INFO] stop finite state machine") + s.logger.Info("stop finite state machine") err = s.fsm.Stop() if err != nil { + s.logger.Error(err.Error()) return err } @@ -170,6 +191,7 @@ func (s *RaftServer) Stop() error { func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() + timer := time.NewTimer(timeout) defer timer.Stop() @@ -178,10 +200,12 @@ func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, e case <-ticker.C: leaderAddr := s.raft.Leader() if leaderAddr != "" { + s.logger.Debug("detect a leader", zap.String("address", string(leaderAddr))) return leaderAddr, nil } case <-timer.C: - return "", errors.ErrTimeout + s.logger.Error("timeout exceeded") + return "", blasterrors.ErrTimeout } } } @@ -189,12 +213,14 @@ func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, e func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { leaderAddr, err := s.LeaderAddress(timeout) if err != nil { + s.logger.Error(err.Error()) return "", err } cf := s.raft.GetConfiguration() err = cf.Error() if err != nil { + s.logger.Error(err.Error()) return "", err } @@ -204,7 +230,8 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { } } - return "", errors.ErrNotFoundLeader + s.logger.Error(blasterrors.ErrNotFoundLeader.Error()) + return "", blasterrors.ErrNotFoundLeader } func (s *RaftServer) Stats() map[string]string { @@ -222,6 +249,7 @@ func (s *RaftServer) IsLeader() bool { func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { _, err := s.LeaderAddress(timeout) if err != nil { + s.logger.Error(err.Error()) return err } @@ -231,6 +259,7 @@ func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { func (s *RaftServer) getMetadata(id string) (map[string]interface{}, error) { metadata, err := s.fsm.GetMetadata(id) if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -246,17 +275,20 @@ func (s *RaftServer) setMetadata(id string, metadata map[string]interface{}) err }, ) if err != nil { + s.logger.Error(err.Error()) return err } msgBytes, err := json.Marshal(msg) if err != nil { + s.logger.Error(err.Error()) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } @@ -271,17 +303,20 @@ func (s *RaftServer) deleteMetadata(id string) error { }, ) if err != nil { + s.logger.Error(err.Error()) return err } msgBytes, err := json.Marshal(msg) if err != nil { + s.logger.Error(err.Error()) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } @@ -291,6 +326,7 @@ func (s *RaftServer) deleteMetadata(id string) error { func (s *RaftServer) setIndexConfig(indexConfig map[string]interface{}) error { err := s.SetState("index_config", indexConfig) if err != nil { + s.logger.Error(err.Error()) return err } @@ -301,6 +337,7 @@ func (s *RaftServer) GetMetadata(id string) (map[string]interface{}, error) { cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -309,6 +346,7 @@ func (s *RaftServer) GetMetadata(id string) (map[string]interface{}, error) { if server.ID == raft.ServerID(id) { metadata, err = s.getMetadata(id) if err != nil { + s.logger.Error(err.Error()) return nil, err } break @@ -320,71 +358,80 @@ func (s *RaftServer) GetMetadata(id string) (map[string]interface{}, error) { func (s *RaftServer) SetMetadata(id string, metadata map[string]interface{}) error { if !s.IsLeader() { + s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { + s.logger.Error(err.Error()) return err } for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(id) { - s.logger.Printf("[INFO] node %v already joined the cluster", id) + s.logger.Info("node already joined the cluster", zap.String("id", id)) return nil } } - f := s.raft.AddVoter(raft.ServerID(id), raft.ServerAddress(metadata["bind_addr"].(string)), 0, 0) + bindAddr, ok := metadata["bind_addr"].(string) + if !ok { + s.logger.Error("missing metadata", zap.String("bind_addr", bindAddr)) + return errors.New("missing metadata") + } + + s.logger.Info("add voter", zap.String("id", id), zap.String("address", bindAddr)) + f := s.raft.AddVoter(raft.ServerID(id), raft.ServerAddress(bindAddr), 0, 0) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } // set metadata err = s.setMetadata(id, metadata) if err != nil { - s.logger.Printf("[ERR] %v", err) - return nil + s.logger.Error(err.Error()) + return err } - s.logger.Printf("[INFO] node %v joined successfully", id) return nil } func (s *RaftServer) DeleteMetadata(id string) error { if !s.IsLeader() { + s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { + s.logger.Error(err.Error()) return err } for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(id) { + s.logger.Debug("remove server", zap.String("id", id)) f := s.raft.RemoveServer(server.ID, 0, 0) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } - - s.logger.Printf("[INFO] node %v leaved successfully", id) - return nil } } // delete metadata err = s.deleteMetadata(id) if err != nil { - s.logger.Printf("[ERR] %v", err) - return nil + s.logger.Error(err.Error()) + return err } - s.logger.Printf("[INFO] node %v does not exists in the cluster", id) return nil } @@ -392,6 +439,7 @@ func (s *RaftServer) GetServers() (map[string]interface{}, error) { cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -399,10 +447,8 @@ func (s *RaftServer) GetServers() (map[string]interface{}, error) { for _, server := range cf.Configuration().Servers { metadata, err := s.GetMetadata(string(server.ID)) if err != nil { - // could not get metadata - continue + s.logger.Warn(err.Error()) } - servers[string(server.ID)] = metadata } @@ -413,6 +459,7 @@ func (s *RaftServer) Snapshot() error { f := s.raft.Snapshot() err := f.Error() if err != nil { + s.logger.Error(err.Error()) return err } @@ -422,6 +469,7 @@ func (s *RaftServer) Snapshot() error { func (s *RaftServer) GetState(key string) (interface{}, error) { value, err := s.fsm.Get(key) if err != nil { + s.logger.Error(err.Error()) return nil, err } @@ -430,6 +478,7 @@ func (s *RaftServer) GetState(key string) (interface{}, error) { func (s *RaftServer) SetState(key string, value interface{}) error { if !s.IsLeader() { + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } @@ -441,17 +490,20 @@ func (s *RaftServer) SetState(key string, value interface{}) error { }, ) if err != nil { + s.logger.Error(err.Error()) return err } msgBytes, err := json.Marshal(msg) if err != nil { + s.logger.Error(err.Error()) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } @@ -460,6 +512,7 @@ func (s *RaftServer) SetState(key string, value interface{}) error { func (s *RaftServer) DeleteState(key string) error { if !s.IsLeader() { + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } @@ -470,17 +523,20 @@ func (s *RaftServer) DeleteState(key string) error { }, ) if err != nil { + s.logger.Error(err.Error()) return err } msgBytes, err := json.Marshal(msg) if err != nil { + s.logger.Error(err.Error()) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { + s.logger.Error(err.Error()) return err } diff --git a/manager/server.go b/manager/server.go index ef979bf..938e947 100644 --- a/manager/server.go +++ b/manager/server.go @@ -15,11 +15,10 @@ package manager import ( - "log" - accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/http" + "go.uber.org/zap" ) type Server struct { @@ -36,11 +35,11 @@ type Server struct { httpRouter *http.Router httpServer *http.Server - logger *log.Logger + logger *zap.Logger httpLogger accesslog.Logger } -func NewServer(id string, metadata map[string]interface{}, peerAddr string, indexConfig map[string]interface{}, logger *log.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(id string, metadata map[string]interface{}, peerAddr string, indexConfig map[string]interface{}, logger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ id: id, metadata: metadata, @@ -56,73 +55,73 @@ func (s *Server) Start() { // bootstrap node? bootstrap := s.peerAddr == "" - s.logger.Printf("[INFO] bootstrap: %v", bootstrap) + s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) // create raft server s.raftServer, err = NewRaftServer(s.id, s.metadata, bootstrap, s.indexConfig, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // create gRPC service s.grpcService, err = NewGRPCService(s.raftServer, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // create gRPC server s.grpcServer, err = grpc.NewServer(s.metadata["grpc_addr"].(string), s.grpcService, s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // create HTTP router s.httpRouter, err = NewRouter(s.metadata["grpc_addr"].(string), s.logger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // create HTTP server s.httpServer, err = http.NewServer(s.metadata["http_addr"].(string), s.httpRouter, s.logger, s.httpLogger) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) return } // start Raft server - s.logger.Print("[INFO] start Raft server") + s.logger.Info("start Raft server") err = s.raftServer.Start() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } // start gRPC service - s.logger.Print("[INFO] start gRPC service") + s.logger.Info("start gRPC service") go func() { err := s.grpcService.Start() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } }() // start gRPC server - s.logger.Print("[INFO] start gRPC server") + s.logger.Info("start gRPC server") go func() { err := s.grpcServer.Start() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } }() // start HTTP server - s.logger.Print("[INFO] start HTTP server") + s.logger.Info("start HTTP server") go func() { _ = s.httpServer.Start() }() @@ -133,54 +132,49 @@ func (s *Server) Start() { defer func() { err := client.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } }() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } err = client.SetNode(s.id, s.metadata) if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Fatal(err.Error()) return } } } func (s *Server) Stop() { - // stop HTTP server - s.logger.Print("[INFO] stop HTTP server") + s.logger.Info("stop HTTP server") err := s.httpServer.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop HTTP router err = s.httpRouter.Close() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop gRPC server - s.logger.Print("[INFO] stop gRPC server") + s.logger.Info("stop gRPC server") err = s.grpcServer.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop gRPC service - s.logger.Print("[INFO] stop gRPC service") + s.logger.Info("stop gRPC service") err = s.grpcService.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } - // stop Raft server - s.logger.Print("[INFO] stop Raft server") + s.logger.Info("stop Raft server") err = s.raftServer.Stop() if err != nil { - s.logger.Printf("[ERR] %v", err) + s.logger.Error(err.Error()) } } From ada45977ad87451e1405a31fe84de05f13d092a3 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 1 Jul 2019 00:45:44 +0900 Subject: [PATCH 3/9] Add test --- Makefile | 1 - indexer/raft_fsm.go | 32 +++-- indexer/raft_server.go | 20 +++ manager/raft_fsm.go | 41 ++++-- manager/raft_server.go | 20 +++ manager/server_test.go | 300 +++++++++++++++++++++++++++++++++++++++++ testutils/testutils.go | 33 +++++ 7 files changed, 421 insertions(+), 26 deletions(-) create mode 100644 manager/server_test.go create mode 100644 testutils/testutils.go diff --git a/Makefile b/Makefile index 0b5d1d8..484515f 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,6 @@ PACKAGES = $(shell $(GO) list ./... | grep -v '/vendor/') PROTOBUFS = $(shell find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) TARGET_PACKAGES = $(shell find . -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) -# TARGET_PACKAGES = $(shell find . -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/ | grep blast-manager) ifeq ($(VERSION),) VERSION = latest diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index 0bc91b5..15fd3a4 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -87,7 +87,7 @@ func (f *RaftFSM) GetMetadata(id string) (map[string]interface{}, error) { return value.(maputils.Map).ToMap(), nil } -func (f *RaftFSM) applySetMetadata(id string, value map[string]interface{}) interface{} { +func (f *RaftFSM) applySetMetadata(id string, value map[string]interface{}) error { f.metadataMutex.RLock() defer f.metadataMutex.RUnlock() @@ -125,7 +125,7 @@ func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { return fields, nil } -func (f *RaftFSM) applyIndexDocument(id string, fields map[string]interface{}) interface{} { +func (f *RaftFSM) applyIndexDocument(id string, fields map[string]interface{}) error { f.logger.Debug("apply to index a document", zap.String("id", id), zap.Any("fields", fields)) err := f.index.Index(id, fields) @@ -137,7 +137,7 @@ func (f *RaftFSM) applyIndexDocument(id string, fields map[string]interface{}) i return nil } -func (f *RaftFSM) applyDeleteDocument(id string) interface{} { +func (f *RaftFSM) applyDeleteDocument(id string) error { f.logger.Debug("apply to delete a document", zap.String("id", id)) err := f.index.Delete(id) @@ -161,6 +161,10 @@ func (f *RaftFSM) Search(request *bleve.SearchRequest) (*bleve.SearchResult, err return result, nil } +type fsmResponse struct { + error error +} + func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Debug("apply a message") @@ -176,9 +180,11 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { err := json.Unmarshal(msg.Data, &data) if err != nil { f.logger.Error(err.Error()) - return err + return &fsmResponse{error: err} } - return f.applySetMetadata(data["id"].(string), data["metadata"].(map[string]interface{})) + + err = f.applySetMetadata(data["id"].(string), data["metadata"].(map[string]interface{})) + return &fsmResponse{error: err} case deleteNode: var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) @@ -192,21 +198,25 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { err := json.Unmarshal(msg.Data, &data) if err != nil { f.logger.Error(err.Error()) - return err + return &fsmResponse{error: err} } - return f.applyIndexDocument(data["id"].(string), data["fields"].(map[string]interface{})) + + err = f.applyIndexDocument(data["id"].(string), data["fields"].(map[string]interface{})) + return &fsmResponse{error: err} case deleteDocument: var data string err := json.Unmarshal(msg.Data, &data) if err != nil { f.logger.Error(err.Error()) - return err + return &fsmResponse{error: err} } - return f.applyDeleteDocument(data) + + err = f.applyDeleteDocument(data) + return &fsmResponse{error: err} default: - err = errors.New("command type not support") + err = errors.New("unsupported command") f.logger.Error(err.Error()) - return err + return &fsmResponse{error: err} } } diff --git a/indexer/raft_server.go b/indexer/raft_server.go index ae82931..3224254 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -283,6 +283,11 @@ func (s *RaftServer) setMetadata(id string, metadata map[string]interface{}) err s.logger.Error(err.Error()) return err } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } return nil } @@ -313,6 +318,11 @@ func (s *RaftServer) deleteMetadata(id string) error { s.logger.Error(err.Error()) return err } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } return nil } @@ -505,6 +515,11 @@ func (s *RaftServer) IndexDocument(docs []map[string]interface{}) (int, error) { s.logger.Error(err.Error()) return -1, err } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return -1, err + } count++ } @@ -541,6 +556,11 @@ func (s *RaftServer) DeleteDocument(ids []string) (int, error) { s.logger.Error(err.Error()) return -1, err } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return -1, err + } count++ } diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go index 8febfbb..7357797 100644 --- a/manager/raft_fsm.go +++ b/manager/raft_fsm.go @@ -66,7 +66,7 @@ func (f *RaftFSM) GetMetadata(id string) (map[string]interface{}, error) { return value.(maputils.Map).ToMap(), nil } -func (f *RaftFSM) applySetMetadata(id string, value map[string]interface{}) interface{} { +func (f *RaftFSM) applySetMetadata(id string, value map[string]interface{}) error { err := f.metadata.Merge(id, value) if err != nil { f.logger.Error(err.Error(), zap.String("id", id), zap.Any("value", value)) @@ -76,7 +76,7 @@ func (f *RaftFSM) applySetMetadata(id string, value map[string]interface{}) inte return nil } -func (f *RaftFSM) applyDeleteMetadata(id string) interface{} { +func (f *RaftFSM) applyDeleteMetadata(id string) error { err := f.metadata.Delete(id) if err != nil { f.logger.Error(err.Error(), zap.String("id", id)) @@ -110,7 +110,7 @@ func (f *RaftFSM) Get(key string) (interface{}, error) { return ret, nil } -func (f *RaftFSM) applySet(key string, value interface{}, merge bool) interface{} { +func (f *RaftFSM) applySet(key string, value interface{}, merge bool) error { if merge { err := f.data.Merge(key, value) if err != nil { @@ -156,7 +156,8 @@ func (f *RaftFSM) delete(keys []string, data interface{}) (interface{}, error) { return data, nil } -func (f *RaftFSM) applyDelete(key string) interface{} { +//func (f *RaftFSM) applyDelete(key string) interface{} { +func (f *RaftFSM) applyDelete(key string) error { err := f.data.Delete(key) if err != nil { switch err { @@ -172,6 +173,10 @@ func (f *RaftFSM) applyDelete(key string) interface{} { return nil } +type fsmResponse struct { + error error +} + func (f *RaftFSM) Apply(l *raft.Log) interface{} { var msg message err := json.Unmarshal(l.Data, &msg) @@ -186,37 +191,45 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { err := json.Unmarshal(msg.Data, &data) if err != nil { f.logger.Error(err.Error()) - return err + return &fsmResponse{error: err} } - return f.applySetMetadata(data["id"].(string), data["metadata"].(map[string]interface{})) + + err = f.applySetMetadata(data["id"].(string), data["metadata"].(map[string]interface{})) + return &fsmResponse{error: err} case deleteNode: var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { f.logger.Error(err.Error()) - return err + return &fsmResponse{error: err} } - return f.applyDeleteMetadata(data["id"].(string)) + + err = f.applyDeleteMetadata(data["id"].(string)) + return &fsmResponse{error: err} case setKeyValue: var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { f.logger.Error(err.Error()) - return err + return &fsmResponse{error: err} } - return f.applySet(data["key"].(string), data["value"], true) + + err = f.applySet(data["key"].(string), data["value"], true) + return &fsmResponse{error: err} case deleteKeyValue: var data map[string]interface{} err := json.Unmarshal(msg.Data, &data) if err != nil { f.logger.Error(err.Error()) - return err + return &fsmResponse{error: err} } - return f.applyDelete(data["key"].(string)) + + err = f.applyDelete(data["key"].(string)) + return &fsmResponse{error: err} default: - err = errors.New("command type not support") + err = errors.New("unsupported command") f.logger.Error(err.Error()) - return err + return &fsmResponse{error: err} } } diff --git a/manager/raft_server.go b/manager/raft_server.go index 6babeed..b2bce8d 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -291,6 +291,11 @@ func (s *RaftServer) setMetadata(id string, metadata map[string]interface{}) err s.logger.Error(err.Error()) return err } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } return nil } @@ -319,6 +324,11 @@ func (s *RaftServer) deleteMetadata(id string) error { s.logger.Error(err.Error()) return err } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } return nil } @@ -506,6 +516,11 @@ func (s *RaftServer) SetState(key string, value interface{}) error { s.logger.Error(err.Error()) return err } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } return nil } @@ -539,6 +554,11 @@ func (s *RaftServer) DeleteState(key string) error { s.logger.Error(err.Error()) return err } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } return nil } diff --git a/manager/server_test.go b/manager/server_test.go new file mode 100644 index 0000000..4ca2373 --- /dev/null +++ b/manager/server_test.go @@ -0,0 +1,300 @@ +package manager + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/mosuka/blast/errors" + + "github.com/blevesearch/bleve/mapping" + "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/logutils" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/testutils" +) + +func TestSingleNode(t *testing.T) { + tmpDir, err := testutils.TmpDir() + if err != nil { + t.Errorf("%v", err) + } + defer func() { + err := os.RemoveAll(tmpDir) + if err != nil { + t.Errorf("%v", err) + } + }() + + curDir, _ := os.Getwd() + + logLevel := "DEBUG" + logFilename := "" + logMaxSize := 500 + logMaxBackups := 3 + logMaxAge := 30 + logCompress := false + + httpAccessLogFilename := "" + httpAccessLogMaxSize := 500 + httpAccessLogMaxBackups := 3 + httpAccessLogMaxAge := 30 + httpAccessLogCompress := false + + nodeId := "manager1" + bindPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + bindAddr := fmt.Sprintf(":%d", bindPort) + grpcPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + grpcAddr := fmt.Sprintf(":%d", grpcPort) + httpPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + httpAddr := fmt.Sprintf(":%d", httpPort) + dataDir := filepath.Join(tmpDir, "store") + peerAddr := "" + + indexMappingFile := filepath.Join(curDir, "../example/wiki_index_mapping.json") + indexType := "upside_down" + indexStorageType := "boltdb" + + // create logger + logger := logutils.NewLogger( + logLevel, + logFilename, + logMaxSize, + logMaxBackups, + logMaxAge, + logCompress, + ) + + // create HTTP access logger + httpAccessLogger := logutils.NewApacheCombinedLogger( + httpAccessLogFilename, + httpAccessLogMaxSize, + httpAccessLogMaxBackups, + httpAccessLogMaxAge, + httpAccessLogCompress, + ) + + // metadata + metadata := map[string]interface{}{ + "bind_addr": bindAddr, + "grpc_addr": grpcAddr, + "http_addr": httpAddr, + "data_dir": dataDir, + } + + // index mapping + indexMapping := mapping.NewIndexMapping() + if indexMappingFile != "" { + _, err := os.Stat(indexMappingFile) + if err == nil { + // read index mapping file + f, err := os.Open(indexMappingFile) + if err != nil { + t.Errorf("%v", err) + } + defer func() { + _ = f.Close() + }() + + b, err := ioutil.ReadAll(f) + if err != nil { + t.Errorf("%v", err) + } + + err = json.Unmarshal(b, indexMapping) + if err != nil { + t.Errorf("%v", err) + } + } else if os.IsNotExist(err) { + t.Errorf("%v", err) + } + } + err = indexMapping.Validate() + if err != nil { + t.Errorf("%v", err) + } + + // IndexMappingImpl -> JSON + indexMappingJSON, err := json.Marshal(indexMapping) + if err != nil { + t.Errorf("%v", err) + } + // JSON -> map[string]interface{} + var indexMappingMap map[string]interface{} + err = json.Unmarshal(indexMappingJSON, &indexMappingMap) + if err != nil { + t.Errorf("%v", err) + } + + indexConfig := map[string]interface{}{ + "index_mapping": indexMappingMap, + "index_type": indexType, + "index_storage_type": indexStorageType, + } + + server, err := NewServer(nodeId, metadata, peerAddr, indexConfig, logger, httpAccessLogger) + defer func() { + server.Stop() + }() + if err != nil { + t.Errorf("%v", err) + } + + // start server + server.Start() + + // sleep + time.Sleep(10 * time.Second) + + // create gRPC client + client, err := grpc.NewClient(grpcAddr) + defer func() { + _ = client.Close() + }() + if err != nil { + t.Errorf("%v", err) + } + + // liveness + liveness, err := client.LivenessProbe() + if err != nil { + t.Errorf("%v", err) + } + exp1 := protobuf.LivenessProbeResponse_ALIVE.String() + act1 := liveness + if exp1 != act1 { + t.Errorf("expected content to see %v, saw %v", exp1, act1) + } + + // readiness + readiness, err := client.ReadinessProbe() + if err != nil { + t.Errorf("%v", err) + } + exp2 := protobuf.ReadinessProbeResponse_READY.String() + act2 := readiness + if exp1 != act1 { + t.Errorf("expected content to see %v, saw %v", exp2, act2) + } + + // get node + node, err := client.GetNode(nodeId) + if err != nil { + t.Errorf("%v", err) + } + exp3 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": bindAddr, + "grpc_addr": grpcAddr, + "http_addr": httpAddr, + "data_dir": dataDir, + }, + "state": "Leader", + } + act3 := node + if !reflect.DeepEqual(exp3, act3) { + t.Errorf("expected content to see %v, saw %v", exp3, act3) + } + + // get cluster + cluster, err := client.GetCluster() + if err != nil { + t.Errorf("%v", err) + } + exp4 := map[string]interface{}{ + nodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": bindAddr, + "grpc_addr": grpcAddr, + "http_addr": httpAddr, + "data_dir": dataDir, + }, + "state": "Leader", + }, + } + act4 := cluster + if !reflect.DeepEqual(exp4, act4) { + t.Errorf("expected content to see %v, saw %v", exp4, act4) + } + + // get index mapping + val5, err := client.GetState("index_config/index_mapping") + if err != nil { + t.Errorf("%v", err) + } + exp5 := indexMappingMap + act5 := *val5.(*map[string]interface{}) + if !reflect.DeepEqual(exp5, act5) { + t.Errorf("expected content to see %v, saw %v", exp5, act5) + } + + // get index type + val6, err := client.GetState("index_config/index_type") + if err != nil { + t.Errorf("%v", err) + } + exp6 := indexType + act6 := *val6.(*string) + if exp6 != act6 { + t.Errorf("expected content to see %v, saw %v", exp6, act6) + } + + // get index storage type + val7, err := client.GetState("index_config/index_storage_type") + if err != nil { + t.Errorf("%v", err) + } + exp7 := indexStorageType + act7 := *val7.(*string) + if exp7 != act7 { + t.Errorf("expected content to see %v, saw %v", exp7, act7) + } + + // set value + err = client.SetState("test/key8", "val8") + if err != nil { + t.Errorf("%v", err) + } + val8, err := client.GetState("test/key8") + if err != nil { + t.Errorf("%v", err) + } + exp8 := "val8" + act8 := *val8.(*string) + if exp8 != act8 { + t.Errorf("expected content to see %v, saw %v", exp8, act8) + } + + // delete value + err = client.DeleteState("test/key8") + if err != nil { + t.Errorf("%v", err) + } + val9, err := client.GetState("test/key8") + if err != errors.ErrNotFound { + t.Errorf("%v", err) + } + if val9 != nil { + t.Errorf("%v", err) + } + + // delete non-existing data + err = client.DeleteState("test/non-existing") + if err == nil { + t.Errorf("%v", err) + } +} diff --git a/testutils/testutils.go b/testutils/testutils.go new file mode 100644 index 0000000..b5b7846 --- /dev/null +++ b/testutils/testutils.go @@ -0,0 +1,33 @@ +package testutils + +import ( + "io/ioutil" + "net" +) + +func TmpDir() (string, error) { + tmp, err := ioutil.TempDir("", "") + if err != nil { + return "", err + } + + return tmp, nil +} + +func TmpPort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + + defer func() { + _ = l.Close() + }() + + return l.Addr().(*net.TCPAddr).Port, nil +} From 41f397ea372d761c7ea367d3fd106626c2817840 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 1 Jul 2019 09:31:22 +0900 Subject: [PATCH 4/9] Add test --- grpc/client.go | 28 ++++- indexer/server.go | 17 ++- indexer/server_test.go | 267 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 297 insertions(+), 15 deletions(-) create mode 100644 indexer/server_test.go diff --git a/grpc/client.go b/grpc/client.go index 5769fd1..dcc5b04 100644 --- a/grpc/client.go +++ b/grpc/client.go @@ -401,24 +401,40 @@ func (c *Client) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, err return int(resp.Count), nil } -func (c *Client) GetIndexConfig(opts ...grpc.CallOption) (*protobuf.GetIndexConfigResponse, error) { - conf, err := c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) +func (c *Client) GetIndexConfig(opts ...grpc.CallOption) (map[string]interface{}, error) { + resp, err := c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) } - return conf, nil + indexConfigIntr, err := protobuf.MarshalAny(resp.IndexConfig) + if err != nil { + st, _ := status.FromError(err) + + return nil, errors.New(st.Message()) + } + indexConfig := *indexConfigIntr.(*map[string]interface{}) + + return indexConfig, nil } -func (c *Client) GetIndexStats(opts ...grpc.CallOption) (*protobuf.GetIndexStatsResponse, error) { - stats, err := c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) +func (c *Client) GetIndexStats(opts ...grpc.CallOption) (map[string]interface{}, error) { + resp, err := c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) + if err != nil { + st, _ := status.FromError(err) + + return nil, errors.New(st.Message()) + } + + indexStatsIntr, err := protobuf.MarshalAny(resp.IndexStats) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) } + indexStats := *indexStatsIntr.(*map[string]interface{}) - return stats, nil + return indexStats, nil } diff --git a/indexer/server.go b/indexer/server.go index ce345d4..2168dda 100644 --- a/indexer/server.go +++ b/indexer/server.go @@ -21,7 +21,6 @@ import ( "github.com/mosuka/blast/errors" "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/http" - "github.com/mosuka/blast/protobuf" "go.uber.org/zap" ) @@ -164,19 +163,19 @@ func (s *Server) Start() { } s.logger.Debug("pull index config from cluster peer", zap.String("address", pc.GetAddress())) - resp, err := pc.GetIndexConfig() + s.indexConfig, err = pc.GetIndexConfig() if err != nil { s.logger.Fatal(err.Error()) return } - ins, err := protobuf.MarshalAny(resp.IndexConfig) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - s.indexConfig = *ins.(*map[string]interface{}) + //ins, err := protobuf.MarshalAny(resp.IndexConfig) + //if err != nil { + // s.logger.Fatal(err.Error()) + // return + //} + // + //s.indexConfig = *ins.(*map[string]interface{}) } var err error diff --git a/indexer/server_test.go b/indexer/server_test.go new file mode 100644 index 0000000..e2c3209 --- /dev/null +++ b/indexer/server_test.go @@ -0,0 +1,267 @@ +package indexer + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/blevesearch/bleve/mapping" + "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/logutils" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/testutils" +) + +func TestSingleNode(t *testing.T) { + tmpDir, err := testutils.TmpDir() + if err != nil { + t.Errorf("%v", err) + } + defer func() { + err := os.RemoveAll(tmpDir) + if err != nil { + t.Errorf("%v", err) + } + }() + + curDir, _ := os.Getwd() + + logLevel := "DEBUG" + logFilename := "" + logMaxSize := 500 + logMaxBackups := 3 + logMaxAge := 30 + logCompress := false + + httpAccessLogFilename := "" + httpAccessLogMaxSize := 500 + httpAccessLogMaxBackups := 3 + httpAccessLogMaxAge := 30 + httpAccessLogCompress := false + + nodeId := "indexer1" + bindPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + bindAddr := fmt.Sprintf(":%d", bindPort) + grpcPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + grpcAddr := fmt.Sprintf(":%d", grpcPort) + httpPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + httpAddr := fmt.Sprintf(":%d", httpPort) + dataDir := filepath.Join(tmpDir, "store") + peerAddr := "" + + indexMappingFile := filepath.Join(curDir, "../example/wiki_index_mapping.json") + indexType := "upside_down" + indexStorageType := "boltdb" + + // create logger + logger := logutils.NewLogger( + logLevel, + logFilename, + logMaxSize, + logMaxBackups, + logMaxAge, + logCompress, + ) + + // create HTTP access logger + httpAccessLogger := logutils.NewApacheCombinedLogger( + httpAccessLogFilename, + httpAccessLogMaxSize, + httpAccessLogMaxBackups, + httpAccessLogMaxAge, + httpAccessLogCompress, + ) + + // metadata + metadata := map[string]interface{}{ + "bind_addr": bindAddr, + "grpc_addr": grpcAddr, + "http_addr": httpAddr, + "data_dir": dataDir, + } + + // index mapping + indexMapping := mapping.NewIndexMapping() + if indexMappingFile != "" { + _, err := os.Stat(indexMappingFile) + if err == nil { + // read index mapping file + f, err := os.Open(indexMappingFile) + if err != nil { + t.Errorf("%v", err) + } + defer func() { + _ = f.Close() + }() + + b, err := ioutil.ReadAll(f) + if err != nil { + t.Errorf("%v", err) + } + + err = json.Unmarshal(b, indexMapping) + if err != nil { + t.Errorf("%v", err) + } + } else if os.IsNotExist(err) { + t.Errorf("%v", err) + } + } + err = indexMapping.Validate() + if err != nil { + t.Errorf("%v", err) + } + + // IndexMappingImpl -> JSON + indexMappingJSON, err := json.Marshal(indexMapping) + if err != nil { + t.Errorf("%v", err) + } + // JSON -> map[string]interface{} + var indexMappingMap map[string]interface{} + err = json.Unmarshal(indexMappingJSON, &indexMappingMap) + if err != nil { + t.Errorf("%v", err) + } + + indexConfig := map[string]interface{}{ + "index_mapping": indexMappingMap, + "index_type": indexType, + "index_storage_type": indexStorageType, + } + + server, err := NewServer("", "", nodeId, metadata, peerAddr, indexConfig, logger, httpAccessLogger) + defer func() { + server.Stop() + }() + if err != nil { + t.Errorf("%v", err) + } + + // start server + server.Start() + + // sleep + time.Sleep(10 * time.Second) + + // create gRPC client + client, err := grpc.NewClient(grpcAddr) + defer func() { + _ = client.Close() + }() + if err != nil { + t.Errorf("%v", err) + } + + // liveness + liveness, err := client.LivenessProbe() + if err != nil { + t.Errorf("%v", err) + } + exp1 := protobuf.LivenessProbeResponse_ALIVE.String() + act1 := liveness + if exp1 != act1 { + t.Errorf("expected content to see %v, saw %v", exp1, act1) + } + + // readiness + readiness, err := client.ReadinessProbe() + if err != nil { + t.Errorf("%v", err) + } + exp2 := protobuf.ReadinessProbeResponse_READY.String() + act2 := readiness + if exp1 != act1 { + t.Errorf("expected content to see %v, saw %v", exp2, act2) + } + + // get node + node, err := client.GetNode(nodeId) + if err != nil { + t.Errorf("%v", err) + } + exp3 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": bindAddr, + "grpc_addr": grpcAddr, + "http_addr": httpAddr, + "data_dir": dataDir, + }, + "state": "Leader", + } + act3 := node + if !reflect.DeepEqual(exp3, act3) { + t.Errorf("expected content to see %v, saw %v", exp3, act3) + } + + // get cluster + cluster, err := client.GetCluster() + if err != nil { + t.Errorf("%v", err) + } + exp4 := map[string]interface{}{ + nodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": bindAddr, + "grpc_addr": grpcAddr, + "http_addr": httpAddr, + "data_dir": dataDir, + }, + "state": "Leader", + }, + } + act4 := cluster + if !reflect.DeepEqual(exp4, act4) { + t.Errorf("expected content to see %v, saw %v", exp4, act4) + } + + // get index config + val5, err := client.GetIndexConfig() + if err != nil { + t.Errorf("%v", err) + } + exp5 := indexConfig + act5 := val5 + if !reflect.DeepEqual(exp5, act5) { + t.Errorf("expected content to see %v, saw %v", exp5, act5) + } + + // get index stats + val6, err := client.GetIndexStats() + if err != nil { + t.Errorf("%v", err) + } + exp6 := map[string]interface{}{ + "index": map[string]interface{}{ + "analysis_time": float64(0), + "batches": float64(0), + "deletes": float64(0), + "errors": float64(0), + "index_time": float64(0), + "num_plain_text_bytes_indexed": float64(0), + "term_searchers_finished": float64(0), + "term_searchers_started": float64(0), + "updates": float64(0), + }, + "search_time": float64(0), + "searches": float64(0), + } + act6 := val6 + if !reflect.DeepEqual(exp6, act6) { + t.Errorf("expected content to see %v, saw %v", exp6, act6) + } +} From ef84f3ad57b018d85225b4d967d1fd4c3b1ce1cd Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 2 Jul 2019 08:52:09 +0900 Subject: [PATCH 5/9] Add test --- cmd/blastd/indexer.go | 2 +- cmd/blastd/manager.go | 2 +- indexer/server_test.go | 16 +- manager/server_test.go | 1077 ++++++++++++++++++++++++++++++++++++---- 4 files changed, 1011 insertions(+), 86 deletions(-) diff --git a/cmd/blastd/indexer.go b/cmd/blastd/indexer.go index 6f8836f..e72dda1 100644 --- a/cmd/blastd/indexer.go +++ b/cmd/blastd/indexer.go @@ -132,7 +132,7 @@ func startIndexer(c *cli.Context) error { "index_storage_type": indexStorageType, } - svr, err := indexer.NewServer(managerAddr, clusterId, nodeId, metadata, peerAddr, indexConfig, logger, httpAccessLogger) + svr, err := indexer.NewServer(managerAddr, clusterId, nodeId, metadata, peerAddr, indexConfig, logger.Named(nodeId), httpAccessLogger) if err != nil { return err } diff --git a/cmd/blastd/manager.go b/cmd/blastd/manager.go index 57c9734..33dce6a 100644 --- a/cmd/blastd/manager.go +++ b/cmd/blastd/manager.go @@ -129,7 +129,7 @@ func startManager(c *cli.Context) error { "index_storage_type": indexStorageType, } - svr, err := manager.NewServer(nodeId, metadata, peerAddr, indexConfig, logger, httpAccessLogger) + svr, err := manager.NewServer(nodeId, metadata, peerAddr, indexConfig, logger.Named(nodeId), httpAccessLogger) if err != nil { return err } diff --git a/indexer/server_test.go b/indexer/server_test.go index e2c3209..b623ff0 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -1,3 +1,17 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package indexer import ( @@ -156,7 +170,7 @@ func TestSingleNode(t *testing.T) { server.Start() // sleep - time.Sleep(10 * time.Second) + time.Sleep(5 * time.Second) // create gRPC client client, err := grpc.NewClient(grpcAddr) diff --git a/manager/server_test.go b/manager/server_test.go index 4ca2373..38bb608 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -1,3 +1,17 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package manager import ( @@ -10,93 +24,21 @@ import ( "testing" "time" - "github.com/mosuka/blast/errors" - "github.com/blevesearch/bleve/mapping" + blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/testutils" ) -func TestSingleNode(t *testing.T) { - tmpDir, err := testutils.TmpDir() - if err != nil { - t.Errorf("%v", err) - } - defer func() { - err := os.RemoveAll(tmpDir) - if err != nil { - t.Errorf("%v", err) - } - }() - +func TestStandalone(t *testing.T) { curDir, _ := os.Getwd() - logLevel := "DEBUG" - logFilename := "" - logMaxSize := 500 - logMaxBackups := 3 - logMaxAge := 30 - logCompress := false - - httpAccessLogFilename := "" - httpAccessLogMaxSize := 500 - httpAccessLogMaxBackups := 3 - httpAccessLogMaxAge := 30 - httpAccessLogCompress := false - - nodeId := "manager1" - bindPort, err := testutils.TmpPort() - if err != nil { - t.Errorf("%v", err) - } - bindAddr := fmt.Sprintf(":%d", bindPort) - grpcPort, err := testutils.TmpPort() - if err != nil { - t.Errorf("%v", err) - } - grpcAddr := fmt.Sprintf(":%d", grpcPort) - httpPort, err := testutils.TmpPort() - if err != nil { - t.Errorf("%v", err) - } - httpAddr := fmt.Sprintf(":%d", httpPort) - dataDir := filepath.Join(tmpDir, "store") - peerAddr := "" - + // index config indexMappingFile := filepath.Join(curDir, "../example/wiki_index_mapping.json") indexType := "upside_down" indexStorageType := "boltdb" - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpAccessLogFilename, - httpAccessLogMaxSize, - httpAccessLogMaxBackups, - httpAccessLogMaxAge, - httpAccessLogCompress, - ) - - // metadata - metadata := map[string]interface{}{ - "bind_addr": bindAddr, - "grpc_addr": grpcAddr, - "http_addr": httpAddr, - "data_dir": dataDir, - } - - // index mapping indexMapping := mapping.NewIndexMapping() if indexMappingFile != "" { _, err := os.Stat(indexMappingFile) @@ -123,30 +65,75 @@ func TestSingleNode(t *testing.T) { t.Errorf("%v", err) } } - err = indexMapping.Validate() + err := indexMapping.Validate() if err != nil { t.Errorf("%v", err) } - - // IndexMappingImpl -> JSON indexMappingJSON, err := json.Marshal(indexMapping) if err != nil { t.Errorf("%v", err) } - // JSON -> map[string]interface{} var indexMappingMap map[string]interface{} err = json.Unmarshal(indexMappingJSON, &indexMappingMap) if err != nil { t.Errorf("%v", err) } - indexConfig := map[string]interface{}{ "index_mapping": indexMappingMap, "index_type": indexType, "index_storage_type": indexStorageType, } - server, err := NewServer(nodeId, metadata, peerAddr, indexConfig, logger, httpAccessLogger) + // create logger + logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + + // create HTTP access logger + httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) + + // node + nodeId := "manager1" + + bindPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + bindAddr := fmt.Sprintf(":%d", bindPort) + + grpcPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + grpcAddr := fmt.Sprintf(":%d", grpcPort) + + httpPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + httpAddr := fmt.Sprintf(":%d", httpPort) + + dataDir, err := testutils.TmpDir() + if err != nil { + t.Errorf("%v", err) + } + defer func() { + err := os.RemoveAll(dataDir) + if err != nil { + t.Errorf("%v", err) + } + }() + + // peer address + peerAddr := "" + + // metadata + metadata := map[string]interface{}{ + "bind_addr": bindAddr, + "grpc_addr": grpcAddr, + "http_addr": httpAddr, + "data_dir": dataDir, + } + + server, err := NewServer(nodeId, metadata, peerAddr, indexConfig, logger.Named(nodeId), httpAccessLogger) defer func() { server.Stop() }() @@ -158,7 +145,7 @@ func TestSingleNode(t *testing.T) { server.Start() // sleep - time.Sleep(10 * time.Second) + time.Sleep(5 * time.Second) // create gRPC client client, err := grpc.NewClient(grpcAddr) @@ -285,7 +272,7 @@ func TestSingleNode(t *testing.T) { t.Errorf("%v", err) } val9, err := client.GetState("test/key8") - if err != errors.ErrNotFound { + if err != blasterrors.ErrNotFound { t.Errorf("%v", err) } if val9 != nil { @@ -298,3 +285,927 @@ func TestSingleNode(t *testing.T) { t.Errorf("%v", err) } } + +func TestCluster(t *testing.T) { + curDir, _ := os.Getwd() + + // index config + indexMappingFile := filepath.Join(curDir, "../example/wiki_index_mapping.json") + indexType := "upside_down" + indexStorageType := "boltdb" + indexMapping := mapping.NewIndexMapping() + if indexMappingFile != "" { + _, err := os.Stat(indexMappingFile) + if err == nil { + // read index mapping file + f, err := os.Open(indexMappingFile) + if err != nil { + t.Errorf("%v", err) + } + defer func() { + _ = f.Close() + }() + + b, err := ioutil.ReadAll(f) + if err != nil { + t.Errorf("%v", err) + } + + err = json.Unmarshal(b, indexMapping) + if err != nil { + t.Errorf("%v", err) + } + } else if os.IsNotExist(err) { + t.Errorf("%v", err) + } + } + err := indexMapping.Validate() + if err != nil { + t.Errorf("%v", err) + } + indexMappingJSON, err := json.Marshal(indexMapping) + if err != nil { + t.Errorf("%v", err) + } + var indexMappingMap map[string]interface{} + err = json.Unmarshal(indexMappingJSON, &indexMappingMap) + if err != nil { + t.Errorf("%v", err) + } + indexConfig := map[string]interface{}{ + "index_mapping": indexMappingMap, + "index_type": indexType, + "index_storage_type": indexStorageType, + } + + // create logger + logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + + // create HTTP access logger + httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) + + // manager1 + manager1NodeId := "manager1" + manager1BindPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + manager1BindAddr := fmt.Sprintf(":%d", manager1BindPort) + manager1GrpcPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + manager1GrpcAddr := fmt.Sprintf(":%d", manager1GrpcPort) + manager1HttpPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + manager1HttpAddr := fmt.Sprintf(":%d", manager1HttpPort) + manager1DataDir, err := testutils.TmpDir() + if err != nil { + t.Errorf("%v", err) + } + defer func() { + err := os.RemoveAll(manager1DataDir) + if err != nil { + t.Errorf("%v", err) + } + }() + manager1PeerAddr := "" + manager1Metadata := map[string]interface{}{ + "bind_addr": manager1BindAddr, + "grpc_addr": manager1GrpcAddr, + "http_addr": manager1HttpAddr, + "data_dir": manager1DataDir, + } + manager1, err := NewServer(manager1NodeId, manager1Metadata, manager1PeerAddr, indexConfig, logger.Named(manager1NodeId), httpAccessLogger) + defer func() { + manager1.Stop() + }() + if err != nil { + t.Errorf("%v", err) + } + + // manager2 + manager2NodeId := "manager2" + manager2BindPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + manager2BindAddr := fmt.Sprintf(":%d", manager2BindPort) + manager2GrpcPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + manager2GrpcAddr := fmt.Sprintf(":%d", manager2GrpcPort) + manager2HttpPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + manager2HttpAddr := fmt.Sprintf(":%d", manager2HttpPort) + manager2DataDir, err := testutils.TmpDir() + if err != nil { + t.Errorf("%v", err) + } + defer func() { + err := os.RemoveAll(manager2DataDir) + if err != nil { + t.Errorf("%v", err) + } + }() + manager2PeerAddr := manager1GrpcAddr + manager2Metadata := map[string]interface{}{ + "bind_addr": manager2BindAddr, + "grpc_addr": manager2GrpcAddr, + "http_addr": manager2HttpAddr, + "data_dir": manager2DataDir, + } + manager2, err := NewServer(manager2NodeId, manager2Metadata, manager2PeerAddr, nil, logger.Named(manager2NodeId), httpAccessLogger) + defer func() { + manager2.Stop() + }() + if err != nil { + t.Errorf("%v", err) + } + + // manager3 + manager3NodeId := "manager3" + manager3BindPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + manager3BindAddr := fmt.Sprintf(":%d", manager3BindPort) + manager3GrpcPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + manager3GrpcAddr := fmt.Sprintf(":%d", manager3GrpcPort) + manager3HttpPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + manager3HttpAddr := fmt.Sprintf(":%d", manager3HttpPort) + manager3DataDir, err := testutils.TmpDir() + if err != nil { + t.Errorf("%v", err) + } + defer func() { + err := os.RemoveAll(manager3DataDir) + if err != nil { + t.Errorf("%v", err) + } + }() + manager3PeerAddr := manager1GrpcAddr + manager3Metadata := map[string]interface{}{ + "bind_addr": manager3BindAddr, + "grpc_addr": manager3GrpcAddr, + "http_addr": manager3HttpAddr, + "data_dir": manager3DataDir, + } + manager3, err := NewServer(manager3NodeId, manager3Metadata, manager3PeerAddr, nil, logger.Named(manager3NodeId), httpAccessLogger) + defer func() { + manager3.Stop() + }() + if err != nil { + t.Errorf("%v", err) + } + + // start managers + manager1.Start() + + time.Sleep(5 * time.Second) + + manager2.Start() + + time.Sleep(5 * time.Second) + + manager3.Start() + + time.Sleep(5 * time.Second) + + // gRPC client for manager1 + client1, err := grpc.NewClient(manager1GrpcAddr) + defer func() { + _ = client1.Close() + }() + if err != nil { + t.Errorf("%v", err) + } + + // gRPC client for manager2 + client2, err := grpc.NewClient(manager2GrpcAddr) + defer func() { + _ = client2.Close() + }() + if err != nil { + t.Errorf("%v", err) + } + + // gRPC client for manager3 + client3, err := grpc.NewClient(manager3GrpcAddr) + defer func() { + _ = client3.Close() + }() + if err != nil { + t.Errorf("%v", err) + } + + // liveness check for manager1 + liveness1, err := client1.LivenessProbe() + if err != nil { + t.Errorf("%v", err) + } + expLiveness1 := protobuf.LivenessProbeResponse_ALIVE.String() + actLiveness1 := liveness1 + if expLiveness1 != actLiveness1 { + t.Errorf("expected content to see %v, saw %v", expLiveness1, actLiveness1) + } + + // liveness check for manager2 + liveness2, err := client2.LivenessProbe() + if err != nil { + t.Errorf("%v", err) + } + expLiveness2 := protobuf.LivenessProbeResponse_ALIVE.String() + actLiveness2 := liveness2 + if expLiveness2 != actLiveness2 { + t.Errorf("expected content to see %v, saw %v", expLiveness2, actLiveness2) + } + + // liveness check for manager3 + liveness3, err := client3.LivenessProbe() + if err != nil { + t.Errorf("%v", err) + } + expLiveness3 := protobuf.LivenessProbeResponse_ALIVE.String() + actLiveness3 := liveness3 + if expLiveness3 != actLiveness3 { + t.Errorf("expected content to see %v, saw %v", expLiveness3, actLiveness3) + } + + // readiness check for manager1 + readiness1, err := client1.ReadinessProbe() + if err != nil { + t.Errorf("%v", err) + } + expReadiness1 := protobuf.ReadinessProbeResponse_READY.String() + actReadiness1 := readiness1 + if expReadiness1 != actReadiness1 { + t.Errorf("expected content to see %v, saw %v", expReadiness1, actReadiness1) + } + + // readiness check for manager2 + readiness2, err := client2.ReadinessProbe() + if err != nil { + t.Errorf("%v", err) + } + expReadiness2 := protobuf.ReadinessProbeResponse_READY.String() + actReadiness2 := readiness2 + if expReadiness2 != actReadiness2 { + t.Errorf("expected content to see %v, saw %v", expReadiness2, actReadiness2) + } + + // readiness check for manager2 + readiness3, err := client3.ReadinessProbe() + if err != nil { + t.Errorf("%v", err) + } + expReadiness3 := protobuf.ReadinessProbeResponse_READY.String() + actReadiness3 := readiness3 + if expReadiness3 != actReadiness3 { + t.Errorf("expected content to see %v, saw %v", expReadiness3, actReadiness3) + } + + // get manager1's node info from manager1 + node1_1, err := client1.GetNode(manager1NodeId) + if err != nil { + t.Errorf("%v", err) + } + expNode1_1 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager1BindAddr, + "grpc_addr": manager1GrpcAddr, + "http_addr": manager1HttpAddr, + "data_dir": manager1DataDir, + }, + "state": "Leader", + } + actNode1_1 := node1_1 + if !reflect.DeepEqual(expNode1_1, actNode1_1) { + t.Errorf("expected content to see %v, saw %v", expNode1_1, actNode1_1) + } + + // get manager2's node info from manager1 + node2_1, err := client1.GetNode(manager2NodeId) + if err != nil { + t.Errorf("%v", err) + } + expNode2_1 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager2BindAddr, + "grpc_addr": manager2GrpcAddr, + "http_addr": manager2HttpAddr, + "data_dir": manager2DataDir, + }, + "state": "Follower", + } + actNode2_1 := node2_1 + if !reflect.DeepEqual(expNode2_1, actNode2_1) { + t.Errorf("expected content to see %v, saw %v", expNode2_1, actNode2_1) + } + + // get manager3's node info from manager1 + node3_1, err := client1.GetNode(manager3NodeId) + if err != nil { + t.Errorf("%v", err) + } + expNode3_1 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager3BindAddr, + "grpc_addr": manager3GrpcAddr, + "http_addr": manager3HttpAddr, + "data_dir": manager3DataDir, + }, + "state": "Follower", + } + actNode3_1 := node3_1 + if !reflect.DeepEqual(expNode3_1, actNode3_1) { + t.Errorf("expected content to see %v, saw %v", expNode3_1, actNode3_1) + } + + // get manager1's node info from manager2 + node1_2, err := client2.GetNode(manager1NodeId) + if err != nil { + t.Errorf("%v", err) + } + expNode1_2 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager1BindAddr, + "grpc_addr": manager1GrpcAddr, + "http_addr": manager1HttpAddr, + "data_dir": manager1DataDir, + }, + "state": "Leader", + } + actNode1_2 := node1_2 + if !reflect.DeepEqual(expNode1_2, actNode1_2) { + t.Errorf("expected content to see %v, saw %v", expNode1_2, actNode1_2) + } + + // get manager2's node info from manager2 + node2_2, err := client2.GetNode(manager2NodeId) + if err != nil { + t.Errorf("%v", err) + } + expNode2_2 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager2BindAddr, + "grpc_addr": manager2GrpcAddr, + "http_addr": manager2HttpAddr, + "data_dir": manager2DataDir, + }, + "state": "Follower", + } + actNode2_2 := node2_2 + if !reflect.DeepEqual(expNode2_2, actNode2_2) { + t.Errorf("expected content to see %v, saw %v", expNode2_2, actNode2_2) + } + + // get manager3's node info from manager2 + node3_2, err := client2.GetNode(manager3NodeId) + if err != nil { + t.Errorf("%v", err) + } + expNode3_2 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager3BindAddr, + "grpc_addr": manager3GrpcAddr, + "http_addr": manager3HttpAddr, + "data_dir": manager3DataDir, + }, + "state": "Follower", + } + actNode3_2 := node3_2 + if !reflect.DeepEqual(expNode3_2, actNode3_2) { + t.Errorf("expected content to see %v, saw %v", expNode3_2, actNode3_2) + } + + // get manager1's node info from manager3 + nodeNode1_3, err := client3.GetNode(manager1NodeId) + if err != nil { + t.Errorf("%v", err) + } + expNode1_3 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager1BindAddr, + "grpc_addr": manager1GrpcAddr, + "http_addr": manager1HttpAddr, + "data_dir": manager1DataDir, + }, + "state": "Leader", + } + actNode1_3 := nodeNode1_3 + if !reflect.DeepEqual(expNode1_3, actNode1_3) { + t.Errorf("expected content to see %v, saw %v", expNode1_3, actNode1_3) + } + + // get manager2's node info from manager3 + node2_3, err := client3.GetNode(manager2NodeId) + if err != nil { + t.Errorf("%v", err) + } + expNode2_3 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager2BindAddr, + "grpc_addr": manager2GrpcAddr, + "http_addr": manager2HttpAddr, + "data_dir": manager2DataDir, + }, + "state": "Follower", + } + actNode2_3 := node2_3 + if !reflect.DeepEqual(expNode2_3, actNode2_3) { + t.Errorf("expected content to see %v, saw %v", expNode2_3, actNode2_3) + } + + // get manager3's node info from manager3 + node3_3, err := client3.GetNode(manager3NodeId) + if err != nil { + t.Errorf("%v", err) + } + expNode3_3 := map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager3BindAddr, + "grpc_addr": manager3GrpcAddr, + "http_addr": manager3HttpAddr, + "data_dir": manager3DataDir, + }, + "state": "Follower", + } + actNode3_3 := node3_3 + if !reflect.DeepEqual(expNode3_3, actNode3_3) { + t.Errorf("expected content to see %v, saw %v", expNode3_3, actNode3_3) + } + + // get cluster info from manager1 + cluster1, err := client1.GetCluster() + if err != nil { + t.Errorf("%v", err) + } + expCluster1 := map[string]interface{}{ + manager1NodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager1BindAddr, + "grpc_addr": manager1GrpcAddr, + "http_addr": manager1HttpAddr, + "data_dir": manager1DataDir, + }, + "state": "Leader", + }, + manager2NodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager2BindAddr, + "grpc_addr": manager2GrpcAddr, + "http_addr": manager2HttpAddr, + "data_dir": manager2DataDir, + }, + "state": "Follower", + }, + manager3NodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager3BindAddr, + "grpc_addr": manager3GrpcAddr, + "http_addr": manager3HttpAddr, + "data_dir": manager3DataDir, + }, + "state": "Follower", + }, + } + actCluster1 := cluster1 + if !reflect.DeepEqual(expCluster1, actCluster1) { + t.Errorf("expected content to see %v, saw %v", expCluster1, actCluster1) + } + + // get cluster info from manager2 + cluster2, err := client2.GetCluster() + if err != nil { + t.Errorf("%v", err) + } + expCluster2 := map[string]interface{}{ + manager1NodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager1BindAddr, + "grpc_addr": manager1GrpcAddr, + "http_addr": manager1HttpAddr, + "data_dir": manager1DataDir, + }, + "state": "Leader", + }, + manager2NodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager2BindAddr, + "grpc_addr": manager2GrpcAddr, + "http_addr": manager2HttpAddr, + "data_dir": manager2DataDir, + }, + "state": "Follower", + }, + manager3NodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager3BindAddr, + "grpc_addr": manager3GrpcAddr, + "http_addr": manager3HttpAddr, + "data_dir": manager3DataDir, + }, + "state": "Follower", + }, + } + actCluster2 := cluster2 + if !reflect.DeepEqual(expCluster2, actCluster2) { + t.Errorf("expected content to see %v, saw %v", expCluster2, actCluster2) + } + + // get cluster info from manager3 + cluster3, err := client3.GetCluster() + if err != nil { + t.Errorf("%v", err) + } + expCluster3 := map[string]interface{}{ + manager1NodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager1BindAddr, + "grpc_addr": manager1GrpcAddr, + "http_addr": manager1HttpAddr, + "data_dir": manager1DataDir, + }, + "state": "Leader", + }, + manager2NodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager2BindAddr, + "grpc_addr": manager2GrpcAddr, + "http_addr": manager2HttpAddr, + "data_dir": manager2DataDir, + }, + "state": "Follower", + }, + manager3NodeId: map[string]interface{}{ + "metadata": map[string]interface{}{ + "bind_addr": manager3BindAddr, + "grpc_addr": manager3GrpcAddr, + "http_addr": manager3HttpAddr, + "data_dir": manager3DataDir, + }, + "state": "Follower", + }, + } + actCluster3 := cluster3 + if !reflect.DeepEqual(expCluster3, actCluster3) { + t.Errorf("expected content to see %v, saw %v", expCluster3, actCluster3) + } + + // get index mapping from manager1 + indexMapping1, err := client1.GetState("index_config/index_mapping") + if err != nil { + t.Errorf("%v", err) + } + expIndexMapping1 := indexMappingMap + actIndexMapping1 := *indexMapping1.(*map[string]interface{}) + if !reflect.DeepEqual(expIndexMapping1, actIndexMapping1) { + t.Errorf("expected content to see %v, saw %v", expIndexMapping1, actIndexMapping1) + } + + // get index mapping from manager2 + indexMapping2, err := client2.GetState("index_config/index_mapping") + if err != nil { + t.Errorf("%v", err) + } + expIndexMapping2 := indexMappingMap + actIndexMapping2 := *indexMapping2.(*map[string]interface{}) + if !reflect.DeepEqual(expIndexMapping2, actIndexMapping2) { + t.Errorf("expected content to see %v, saw %v", expIndexMapping2, actIndexMapping2) + } + + // get index mapping from manager3 + indexMapping3, err := client3.GetState("index_config/index_mapping") + if err != nil { + t.Errorf("%v", err) + } + expIndexMapping3 := indexMappingMap + actIndexMapping3 := *indexMapping3.(*map[string]interface{}) + if !reflect.DeepEqual(expIndexMapping3, actIndexMapping3) { + t.Errorf("expected content to see %v, saw %v", expIndexMapping3, actIndexMapping3) + } + + // get index type from manager1 + indexType1, err := client1.GetState("index_config/index_type") + if err != nil { + t.Errorf("%v", err) + } + expIndexType1 := indexType + actIndexType1 := *indexType1.(*string) + if expIndexType1 != actIndexType1 { + t.Errorf("expected content to see %v, saw %v", expIndexType1, actIndexType1) + } + + // get index type from manager2 + indexType2, err := client2.GetState("index_config/index_type") + if err != nil { + t.Errorf("%v", err) + } + expIndexType2 := indexType + actIndexType2 := *indexType2.(*string) + if expIndexType2 != actIndexType2 { + t.Errorf("expected content to see %v, saw %v", expIndexType2, actIndexType2) + } + + // get index type from manager3 + indexType3, err := client2.GetState("index_config/index_type") + if err != nil { + t.Errorf("%v", err) + } + expIndexType3 := indexType + actIndexType3 := *indexType3.(*string) + if expIndexType3 != actIndexType3 { + t.Errorf("expected content to see %v, saw %v", expIndexType3, actIndexType3) + } + + // get index storage type from manager1 + indexStorageType1, err := client1.GetState("index_config/index_storage_type") + if err != nil { + t.Errorf("%v", err) + } + expIndexStorageType1 := indexStorageType + actIndexStorageType1 := *indexStorageType1.(*string) + if expIndexStorageType1 != actIndexStorageType1 { + t.Errorf("expected content to see %v, saw %v", expIndexStorageType1, actIndexStorageType1) + } + + // get index storage type from manager2 + indexStorageType2, err := client2.GetState("index_config/index_storage_type") + if err != nil { + t.Errorf("%v", err) + } + expIndexStorageType2 := indexStorageType + actIndexStorageType2 := *indexStorageType2.(*string) + if expIndexStorageType2 != actIndexStorageType2 { + t.Errorf("expected content to see %v, saw %v", expIndexStorageType2, actIndexStorageType2) + } + + // get index storage type from manager3 + indexStorageType3, err := client3.GetState("index_config/index_storage_type") + if err != nil { + t.Errorf("%v", err) + } + expIndexStorageType3 := indexStorageType + actIndexStorageType3 := *indexStorageType3.(*string) + if expIndexStorageType3 != actIndexStorageType3 { + t.Errorf("expected content to see %v, saw %v", expIndexStorageType3, actIndexStorageType3) + } + + // set value to manager1 + err = client1.SetState("test/key1", "val1") + if err != nil { + t.Errorf("%v", err) + } + + time.Sleep(2 * time.Second) // wait for data to propagate + + // get value from manager1 + val1_1, err := client1.GetState("test/key1") + if err != nil { + t.Errorf("%v", err) + } + expVal1_1 := "val1" + actVal1_1 := *val1_1.(*string) + if expVal1_1 != actVal1_1 { + t.Errorf("expected content to see %v, saw %v", expVal1_1, actVal1_1) + } + + // get value from manager2 + val1_2, err := client2.GetState("test/key1") + if err != nil { + t.Errorf("%v", err) + } + expVal1_2 := "val1" + actVal1_2 := *val1_2.(*string) + if expVal1_2 != actVal1_2 { + t.Errorf("expected content to see %v, saw %v", expVal1_2, actVal1_2) + } + + // get value from manager3 + val1_3, err := client3.GetState("test/key1") + if err != nil { + t.Errorf("%v", err) + } + expVal1_3 := "val1" + actVal1_3 := *val1_3.(*string) + if expVal1_3 != actVal1_3 { + t.Errorf("expected content to see %v, saw %v", expVal1_3, actVal1_3) + } + + // set value to manager2 + err = client2.SetState("test/key2", "val2") + if err != nil { + t.Errorf("%v", err) + } + + time.Sleep(2 * time.Second) // wait for data to propagate + + // get value from manager1 + val2_1, err := client1.GetState("test/key2") + if err != nil { + t.Errorf("%v", err) + } + expVal2_1 := "val2" + actVal2_1 := *val2_1.(*string) + if expVal2_1 != actVal2_1 { + t.Errorf("expected content to see %v, saw %v", expVal2_1, actVal2_1) + } + + // get value from manager2 + val2_2, err := client2.GetState("test/key2") + if err != nil { + t.Errorf("%v", err) + } + expVal2_2 := "val2" + actVal2_2 := *val2_2.(*string) + if expVal2_2 != actVal2_2 { + t.Errorf("expected content to see %v, saw %v", expVal2_2, actVal2_2) + } + + // get value from manager3 + val2_3, err := client3.GetState("test/key2") + if err != nil { + t.Errorf("%v", err) + } + expVal2_3 := "val2" + actVal2_3 := *val2_3.(*string) + if expVal2_3 != actVal2_3 { + t.Errorf("expected content to see %v, saw %v", expVal2_3, actVal2_3) + } + + // set value to manager3 + err = client3.SetState("test/key3", "val3") + if err != nil { + t.Errorf("%v", err) + } + + time.Sleep(2 * time.Second) // wait for data to propagate + + // get value from manager1 + val3_1, err := client1.GetState("test/key3") + if err != nil { + t.Errorf("%v", err) + } + expVal3_1 := "val3" + actVal3_1 := *val3_1.(*string) + if expVal3_1 != actVal3_1 { + t.Errorf("expected content to see %v, saw %v", expVal3_1, actVal3_1) + } + + // get value from manager2 + val3_2, err := client2.GetState("test/key3") + if err != nil { + t.Errorf("%v", err) + } + expVal3_2 := "val3" + actVal3_2 := *val3_2.(*string) + if expVal3_2 != actVal3_2 { + t.Errorf("expected content to see %v, saw %v", expVal3_2, actVal3_2) + } + + // get value from manager3 + val3_3, err := client3.GetState("test/key3") + if err != nil { + t.Errorf("%v", err) + } + expVal3_3 := "val3" + actVal3_3 := *val3_3.(*string) + if expVal3_3 != actVal3_3 { + t.Errorf("expected content to see %v, saw %v", expVal3_3, actVal3_3) + } + + // delete value from manager1 + err = client1.DeleteState("test/key1") + if err != nil { + t.Errorf("%v", err) + } + + time.Sleep(2 * time.Second) // wait for data to propagate + + // get value from manager1 + val1_1, err = client1.GetState("test/key1") + if err != blasterrors.ErrNotFound { + t.Errorf("%v", err) + } + if val1_1 != nil { + t.Errorf("%v", err) + } + + // get value from manager2 + val1_2, err = client2.GetState("test/key1") + if err != blasterrors.ErrNotFound { + t.Errorf("%v", err) + } + if val1_2 != nil { + t.Errorf("%v", err) + } + + // get value from manager3 + val1_3, err = client3.GetState("test/key1") + if err != blasterrors.ErrNotFound { + t.Errorf("%v", err) + } + if val1_3 != nil { + t.Errorf("%v", err) + } + + // delete value from manager2 + err = client2.DeleteState("test/key2") + if err != nil { + t.Errorf("%v", err) + } + + time.Sleep(2 * time.Second) // wait for data to propagate + + // get value from manager1 + val2_1, err = client1.GetState("test/key2") + if err != blasterrors.ErrNotFound { + t.Errorf("%v", err) + } + if val2_1 != nil { + t.Errorf("%v", err) + } + + // get value from manager2 + val2_2, err = client2.GetState("test/key2") + if err != blasterrors.ErrNotFound { + t.Errorf("%v", err) + } + if val2_2 != nil { + t.Errorf("%v", err) + } + + // get value from manager2 + val2_3, err = client3.GetState("test/key2") + if err != blasterrors.ErrNotFound { + t.Errorf("%v", err) + } + if val2_3 != nil { + t.Errorf("%v", err) + } + + // delete value from manager3 + err = client3.DeleteState("test/key3") + if err != nil { + t.Errorf("%v", err) + } + + time.Sleep(2 * time.Second) // wait for data to propagate + + // get value from manager1 + val3_1, err = client1.GetState("test/key3") + if err != blasterrors.ErrNotFound { + t.Errorf("%v", err) + } + if val3_1 != nil { + t.Errorf("%v", err) + } + + // get value from manager2 + val3_2, err = client2.GetState("test/key3") + if err != blasterrors.ErrNotFound { + t.Errorf("%v", err) + } + if val3_2 != nil { + t.Errorf("%v", err) + } + + // get value from manager3 + val3_3, err = client3.GetState("test/key3") + if err != blasterrors.ErrNotFound { + t.Errorf("%v", err) + } + if val3_3 != nil { + t.Errorf("%v", err) + } + + // delete non-existing data from manager1 + err = client1.DeleteState("test/non-existing") + if err == nil { + t.Errorf("%v", err) + } + + // delete non-existing data from manager2 + err = client2.DeleteState("test/non-existing") + if err == nil { + t.Errorf("%v", err) + } + + // delete non-existing data from manager3 + err = client3.DeleteState("test/non-existing") + if err == nil { + t.Errorf("%v", err) + } +} From ffd9c9fcae1860d1fb9c2f65d010a90172d1ab79 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 2 Jul 2019 14:28:31 +0900 Subject: [PATCH 6/9] Add test --- indexer/server_test.go | 277 ++++++++++++++++++++++++++--------------- manager/server_test.go | 90 ++++++------- 2 files changed, 226 insertions(+), 141 deletions(-) diff --git a/indexer/server_test.go b/indexer/server_test.go index b623ff0..2103ca6 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -24,91 +24,19 @@ import ( "testing" "time" + "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/mapping" + "github.com/mosuka/blast/errors" "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/testutils" ) -func TestSingleNode(t *testing.T) { - tmpDir, err := testutils.TmpDir() - if err != nil { - t.Errorf("%v", err) - } - defer func() { - err := os.RemoveAll(tmpDir) - if err != nil { - t.Errorf("%v", err) - } - }() - +func TestIndexserStandalone(t *testing.T) { curDir, _ := os.Getwd() - logLevel := "DEBUG" - logFilename := "" - logMaxSize := 500 - logMaxBackups := 3 - logMaxAge := 30 - logCompress := false - - httpAccessLogFilename := "" - httpAccessLogMaxSize := 500 - httpAccessLogMaxBackups := 3 - httpAccessLogMaxAge := 30 - httpAccessLogCompress := false - - nodeId := "indexer1" - bindPort, err := testutils.TmpPort() - if err != nil { - t.Errorf("%v", err) - } - bindAddr := fmt.Sprintf(":%d", bindPort) - grpcPort, err := testutils.TmpPort() - if err != nil { - t.Errorf("%v", err) - } - grpcAddr := fmt.Sprintf(":%d", grpcPort) - httpPort, err := testutils.TmpPort() - if err != nil { - t.Errorf("%v", err) - } - httpAddr := fmt.Sprintf(":%d", httpPort) - dataDir := filepath.Join(tmpDir, "store") - peerAddr := "" - indexMappingFile := filepath.Join(curDir, "../example/wiki_index_mapping.json") - indexType := "upside_down" - indexStorageType := "boltdb" - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpAccessLogFilename, - httpAccessLogMaxSize, - httpAccessLogMaxBackups, - httpAccessLogMaxAge, - httpAccessLogCompress, - ) - - // metadata - metadata := map[string]interface{}{ - "bind_addr": bindAddr, - "grpc_addr": grpcAddr, - "http_addr": httpAddr, - "data_dir": dataDir, - } - - // index mapping indexMapping := mapping.NewIndexMapping() if indexMappingFile != "" { _, err := os.Stat(indexMappingFile) @@ -135,29 +63,76 @@ func TestSingleNode(t *testing.T) { t.Errorf("%v", err) } } - err = indexMapping.Validate() + err := indexMapping.Validate() if err != nil { t.Errorf("%v", err) } - - // IndexMappingImpl -> JSON indexMappingJSON, err := json.Marshal(indexMapping) if err != nil { t.Errorf("%v", err) } - // JSON -> map[string]interface{} var indexMappingMap map[string]interface{} err = json.Unmarshal(indexMappingJSON, &indexMappingMap) if err != nil { t.Errorf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" + indexConfig := map[string]interface{}{ "index_mapping": indexMappingMap, "index_type": indexType, "index_storage_type": indexStorageType, } + // create logger + logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + + // create HTTP access logger + httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) + + nodeId := "indexer1" + + bindPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + bindAddr := fmt.Sprintf(":%d", bindPort) + + grpcPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + grpcAddr := fmt.Sprintf(":%d", grpcPort) + + httpPort, err := testutils.TmpPort() + if err != nil { + t.Errorf("%v", err) + } + httpAddr := fmt.Sprintf(":%d", httpPort) + + dataDir, err := testutils.TmpDir() + if err != nil { + t.Errorf("%v", err) + } + defer func() { + err := os.RemoveAll(dataDir) + if err != nil { + t.Errorf("%v", err) + } + }() + + peerAddr := "" + + // metadata + metadata := map[string]interface{}{ + "bind_addr": bindAddr, + "grpc_addr": grpcAddr, + "http_addr": httpAddr, + "data_dir": dataDir, + } + server, err := NewServer("", "", nodeId, metadata, peerAddr, indexConfig, logger, httpAccessLogger) defer func() { server.Stop() @@ -208,7 +183,7 @@ func TestSingleNode(t *testing.T) { if err != nil { t.Errorf("%v", err) } - exp3 := map[string]interface{}{ + expNode := map[string]interface{}{ "metadata": map[string]interface{}{ "bind_addr": bindAddr, "grpc_addr": grpcAddr, @@ -217,9 +192,9 @@ func TestSingleNode(t *testing.T) { }, "state": "Leader", } - act3 := node - if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp3, act3) + actNode := node + if !reflect.DeepEqual(expNode, actNode) { + t.Errorf("expected content to see %v, saw %v", expNode, actNode) } // get cluster @@ -227,7 +202,7 @@ func TestSingleNode(t *testing.T) { if err != nil { t.Errorf("%v", err) } - exp4 := map[string]interface{}{ + expCluster := map[string]interface{}{ nodeId: map[string]interface{}{ "metadata": map[string]interface{}{ "bind_addr": bindAddr, @@ -238,28 +213,28 @@ func TestSingleNode(t *testing.T) { "state": "Leader", }, } - act4 := cluster - if !reflect.DeepEqual(exp4, act4) { - t.Errorf("expected content to see %v, saw %v", exp4, act4) + actCluster := cluster + if !reflect.DeepEqual(expCluster, actCluster) { + t.Errorf("expected content to see %v, saw %v", expCluster, actCluster) } // get index config - val5, err := client.GetIndexConfig() + indexConfig1, err := client.GetIndexConfig() if err != nil { t.Errorf("%v", err) } - exp5 := indexConfig - act5 := val5 - if !reflect.DeepEqual(exp5, act5) { - t.Errorf("expected content to see %v, saw %v", exp5, act5) + expIndexConfig := indexConfig + actIndexConfig := indexConfig1 + if !reflect.DeepEqual(expIndexConfig, actIndexConfig) { + t.Errorf("expected content to see %v, saw %v", expIndexConfig, actIndexConfig) } // get index stats - val6, err := client.GetIndexStats() + indexStats, err := client.GetIndexStats() if err != nil { t.Errorf("%v", err) } - exp6 := map[string]interface{}{ + expIndexStats := map[string]interface{}{ "index": map[string]interface{}{ "analysis_time": float64(0), "batches": float64(0), @@ -274,8 +249,116 @@ func TestSingleNode(t *testing.T) { "search_time": float64(0), "searches": float64(0), } - act6 := val6 - if !reflect.DeepEqual(exp6, act6) { - t.Errorf("expected content to see %v, saw %v", exp6, act6) + actIndexStats := indexStats + if !reflect.DeepEqual(expIndexStats, actIndexStats) { + t.Errorf("expected content to see %v, saw %v", expIndexStats, actIndexStats) + } + + // put document + endikiDocs := make([]map[string]interface{}, 0) + enwiki1fieldsPath := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") + // read index mapping file + enwiki1FieldsFile, err := os.Open(enwiki1fieldsPath) + if err != nil { + t.Errorf("%v", err) + } + defer func() { + _ = enwiki1FieldsFile.Close() + }() + + enwiki1FieldsByte, err := ioutil.ReadAll(enwiki1FieldsFile) + if err != nil { + t.Errorf("%v", err) + } + + var enwiki1Fields map[string]interface{} + err = json.Unmarshal(enwiki1FieldsByte, &enwiki1Fields) + if err != nil { + t.Errorf("%v", err) + } + enwiki1Doc := map[string]interface{}{ + "id": "enwiki_1", + "fields": enwiki1Fields, + } + endikiDocs = append(endikiDocs, enwiki1Doc) + count, err := client.IndexDocument(endikiDocs) + if err != nil { + t.Errorf("%v", err) + } + expCount := 1 + actCount := count + if expCount != actCount { + t.Errorf("expected content to see %v, saw %v", expCount, actCount) + } + + // get document + fields1, err := client.GetDocument("enwiki_1") + if err != nil { + t.Errorf("%v", err) + } + expFields := enwiki1Fields + actFields := fields1 + if !reflect.DeepEqual(expFields, actFields) { + t.Errorf("expected content to see %v, saw %v", expFields, actFields) + } + + // search + searchRequestPath := filepath.Join(curDir, "../example/wiki_search_request.json") + + searchRequestFile, err := os.Open(searchRequestPath) + if err != nil { + t.Errorf("%v", err) + } + defer func() { + _ = searchRequestFile.Close() + }() + + searchRequestByte, err := ioutil.ReadAll(searchRequestFile) + if err != nil { + t.Errorf("%v", err) + } + + searchRequest := bleve.NewSearchRequest(nil) + err = json.Unmarshal(searchRequestByte, searchRequest) + if err != nil { + t.Errorf("%v", err) + } + + searchResult1, err := client.Search(searchRequest) + if err != nil { + t.Errorf("%v", err) + } + expTotal := uint64(1) + actTotal := searchResult1.Total + if expTotal != actTotal { + t.Errorf("expected content to see %v, saw %v", expTotal, actTotal) + } + + // delete document + count, err = client.DeleteDocument([]string{"enwiki_1"}) + if err != nil { + t.Errorf("%v", err) + } + expCount = 1 + actCount = count + if expCount != actCount { + t.Errorf("expected content to see %v, saw %v", expIndexConfig, actIndexConfig) + } + + // get document + fields1, err = client.GetDocument("enwiki_1") + if err != errors.ErrNotFound { + t.Errorf("%v", err) + } + + // search + searchResult1, err = client.Search(searchRequest) + if err != nil { + t.Errorf("%v", err) + } + expTotal = uint64(0) + actTotal = searchResult1.Total + if expTotal != actTotal { + t.Errorf("expected content to see %v, saw %v", expTotal, actTotal) } } diff --git a/manager/server_test.go b/manager/server_test.go index 38bb608..d9e0d02 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -32,13 +32,11 @@ import ( "github.com/mosuka/blast/testutils" ) -func TestStandalone(t *testing.T) { +func TestManagerStandalone(t *testing.T) { curDir, _ := os.Getwd() // index config indexMappingFile := filepath.Join(curDir, "../example/wiki_index_mapping.json") - indexType := "upside_down" - indexStorageType := "boltdb" indexMapping := mapping.NewIndexMapping() if indexMappingFile != "" { _, err := os.Stat(indexMappingFile) @@ -78,6 +76,10 @@ func TestStandalone(t *testing.T) { if err != nil { t.Errorf("%v", err) } + + indexType := "upside_down" + indexStorageType := "boltdb" + indexConfig := map[string]interface{}{ "index_mapping": indexMappingMap, "index_type": indexType, @@ -161,10 +163,10 @@ func TestStandalone(t *testing.T) { if err != nil { t.Errorf("%v", err) } - exp1 := protobuf.LivenessProbeResponse_ALIVE.String() - act1 := liveness - if exp1 != act1 { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + expLiveness := protobuf.LivenessProbeResponse_ALIVE.String() + actLiveness := liveness + if expLiveness != actLiveness { + t.Errorf("expected content to see %v, saw %v", expLiveness, actLiveness) } // readiness @@ -172,10 +174,10 @@ func TestStandalone(t *testing.T) { if err != nil { t.Errorf("%v", err) } - exp2 := protobuf.ReadinessProbeResponse_READY.String() - act2 := readiness - if exp1 != act1 { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + expReadiness := protobuf.ReadinessProbeResponse_READY.String() + actReadiness := readiness + if expLiveness != actLiveness { + t.Errorf("expected content to see %v, saw %v", expReadiness, actReadiness) } // get node @@ -183,7 +185,7 @@ func TestStandalone(t *testing.T) { if err != nil { t.Errorf("%v", err) } - exp3 := map[string]interface{}{ + expNode := map[string]interface{}{ "metadata": map[string]interface{}{ "bind_addr": bindAddr, "grpc_addr": grpcAddr, @@ -192,9 +194,9 @@ func TestStandalone(t *testing.T) { }, "state": "Leader", } - act3 := node - if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp3, act3) + actNode := node + if !reflect.DeepEqual(expNode, actNode) { + t.Errorf("expected content to see %v, saw %v", expNode, actNode) } // get cluster @@ -202,7 +204,7 @@ func TestStandalone(t *testing.T) { if err != nil { t.Errorf("%v", err) } - exp4 := map[string]interface{}{ + expCluster := map[string]interface{}{ nodeId: map[string]interface{}{ "metadata": map[string]interface{}{ "bind_addr": bindAddr, @@ -213,69 +215,69 @@ func TestStandalone(t *testing.T) { "state": "Leader", }, } - act4 := cluster - if !reflect.DeepEqual(exp4, act4) { - t.Errorf("expected content to see %v, saw %v", exp4, act4) + actCluster := cluster + if !reflect.DeepEqual(expCluster, actCluster) { + t.Errorf("expected content to see %v, saw %v", expCluster, actCluster) } // get index mapping - val5, err := client.GetState("index_config/index_mapping") + indexMapping1, err := client.GetState("index_config/index_mapping") if err != nil { t.Errorf("%v", err) } - exp5 := indexMappingMap - act5 := *val5.(*map[string]interface{}) - if !reflect.DeepEqual(exp5, act5) { - t.Errorf("expected content to see %v, saw %v", exp5, act5) + expIndexMapping := indexMappingMap + actIndexMapping := *indexMapping1.(*map[string]interface{}) + if !reflect.DeepEqual(expIndexMapping, actIndexMapping) { + t.Errorf("expected content to see %v, saw %v", expIndexMapping, actIndexMapping) } // get index type - val6, err := client.GetState("index_config/index_type") + indexType1, err := client.GetState("index_config/index_type") if err != nil { t.Errorf("%v", err) } - exp6 := indexType - act6 := *val6.(*string) - if exp6 != act6 { - t.Errorf("expected content to see %v, saw %v", exp6, act6) + expIndexType := indexType + actIndexType := *indexType1.(*string) + if expIndexType != actIndexType { + t.Errorf("expected content to see %v, saw %v", expIndexType, actIndexType) } // get index storage type - val7, err := client.GetState("index_config/index_storage_type") + indexStorageType1, err := client.GetState("index_config/index_storage_type") if err != nil { t.Errorf("%v", err) } - exp7 := indexStorageType - act7 := *val7.(*string) - if exp7 != act7 { - t.Errorf("expected content to see %v, saw %v", exp7, act7) + expIndexStorageType := indexStorageType + actIndexStorageType := *indexStorageType1.(*string) + if expIndexStorageType != actIndexStorageType { + t.Errorf("expected content to see %v, saw %v", expIndexStorageType, actIndexStorageType) } // set value - err = client.SetState("test/key8", "val8") + err = client.SetState("test/key1", "val1") if err != nil { t.Errorf("%v", err) } - val8, err := client.GetState("test/key8") + val1, err := client.GetState("test/key1") if err != nil { t.Errorf("%v", err) } - exp8 := "val8" - act8 := *val8.(*string) - if exp8 != act8 { - t.Errorf("expected content to see %v, saw %v", exp8, act8) + expVal1 := "val1" + actVal1 := *val1.(*string) + if expVal1 != actVal1 { + t.Errorf("expected content to see %v, saw %v", expVal1, actVal1) } // delete value - err = client.DeleteState("test/key8") + err = client.DeleteState("test/key1") if err != nil { t.Errorf("%v", err) } - val9, err := client.GetState("test/key8") + val1, err = client.GetState("test/key1") if err != blasterrors.ErrNotFound { t.Errorf("%v", err) } - if val9 != nil { + if val1 != nil { t.Errorf("%v", err) } @@ -286,7 +288,7 @@ func TestStandalone(t *testing.T) { } } -func TestCluster(t *testing.T) { +func TestManagerCluster(t *testing.T) { curDir, _ := os.Getwd() // index config From 67a9a31c4ab37ea4eae2338714aafabd59a8d442 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 2 Jul 2019 15:44:51 +0900 Subject: [PATCH 7/9] Fix bug --- indexer/grpc_service.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index c270151..5d857b3 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -17,6 +17,7 @@ package indexer import ( "context" "errors" + "fmt" "io" "reflect" "sync" @@ -509,6 +510,18 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { c <- *resp } + // update cluster config to manager if leader + if s.raftServer.IsLeader() { + client, err := s.getManagerClient() + if err != nil { + s.logger.Error(err.Error()) + } + err = client.SetState(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterId), cluster) + if err != nil { + s.logger.Error(err.Error()) + } + } + // keep current cluster s.cluster = cluster s.logger.Debug("cluster", zap.Any("cluster", cluster)) @@ -654,6 +667,7 @@ func (s *GRPCService) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } + } return resp, nil From bec89ce6ae0be1c19caa7a01333aacf1de23d798 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 2 Jul 2019 20:23:09 +0900 Subject: [PATCH 8/9] Add client retry --- grpc/client.go | 12 +++++++++++- indexer/grpc_service.go | 2 +- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/grpc/client.go b/grpc/client.go index dcc5b04..0e4f853 100644 --- a/grpc/client.go +++ b/grpc/client.go @@ -18,10 +18,12 @@ import ( "context" "errors" "math" + "time" "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf" "google.golang.org/grpc" @@ -38,18 +40,26 @@ type Client struct { func NewContext() (context.Context, context.CancelFunc) { baseCtx := context.TODO() - return context.WithCancel(baseCtx) + return context.WithTimeout(baseCtx, 60*time.Second) } func NewClient(address string) (*Client, error) { ctx, cancel := NewContext() + retryOpts := []grpc_retry.CallOption{ + grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), + grpc_retry.WithCodes(codes.Unavailable), + grpc_retry.WithMax(100), + } + dialOpts := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithDefaultCallOptions( grpc.MaxCallSendMsgSize(math.MaxInt32), grpc.MaxCallRecvMsgSize(math.MaxInt32), ), + grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(retryOpts...)), + grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(retryOpts...)), } conn, err := grpc.DialContext(ctx, address, dialOpts...) diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 5d857b3..cc4210a 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -510,7 +510,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { c <- *resp } - // update cluster config to manager if leader + // update the cluster config to manager if it is a leader if s.raftServer.IsLeader() { client, err := s.getManagerClient() if err != nil { From 198e7ae569e44eb1ac7e3beb8d1a62f3132afb30 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 3 Jul 2019 01:37:57 +0900 Subject: [PATCH 9/9] Add client retry --- grpc/client.go | 10 +++++++--- indexer/grpc_service.go | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/grpc/client.go b/grpc/client.go index 0e4f853..4d9c897 100644 --- a/grpc/client.go +++ b/grpc/client.go @@ -46,7 +46,11 @@ func NewContext() (context.Context, context.CancelFunc) { func NewClient(address string) (*Client, error) { ctx, cancel := NewContext() - retryOpts := []grpc_retry.CallOption{ + streamRetryOpts := []grpc_retry.CallOption{ + grpc_retry.Disable(), + } + + unaryRetryOpts := []grpc_retry.CallOption{ grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), grpc_retry.WithCodes(codes.Unavailable), grpc_retry.WithMax(100), @@ -58,8 +62,8 @@ func NewClient(address string) (*Client, error) { grpc.MaxCallSendMsgSize(math.MaxInt32), grpc.MaxCallRecvMsgSize(math.MaxInt32), ), - grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(retryOpts...)), - grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(retryOpts...)), + grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), + grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), } conn, err := grpc.DialContext(ctx, address, dialOpts...) diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index cc4210a..430f48a 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -511,7 +511,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { } // update the cluster config to manager if it is a leader - if s.raftServer.IsLeader() { + if s.managerAddr != "" && s.raftServer.IsLeader() { client, err := s.getManagerClient() if err != nil { s.logger.Error(err.Error())