run 'go get -u; make revendor'
Signed-off-by: Stephan Renatus <srenatus@chef.io>
This commit is contained in:
3
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
generated
vendored
3
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
generated
vendored
@@ -1,3 +1,6 @@
|
||||
#vendor
|
||||
vendor/
|
||||
|
||||
# Created by .ignore support plugin (hsz.mobi)
|
||||
coverage.txt
|
||||
### Go template
|
||||
|
10
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
generated
vendored
10
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
generated
vendored
@@ -1,9 +1,16 @@
|
||||
sudo: false
|
||||
language: go
|
||||
# * github.com/grpc/grpc-go still supports go1.6
|
||||
# - When we drop support for go1.6 we can remove golang.org/x/net/context
|
||||
# below as it is part of the Go std library since go1.7
|
||||
# * github.com/prometheus/client_golang already requires at least go1.7 since
|
||||
# September 2017
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- master
|
||||
|
||||
install:
|
||||
@@ -11,9 +18,8 @@ install:
|
||||
- go get google.golang.org/grpc
|
||||
- go get golang.org/x/net/context
|
||||
- go get github.com/stretchr/testify
|
||||
|
||||
script:
|
||||
- ./test_all.sh
|
||||
- make test
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
24
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
generated
vendored
Normal file
24
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04
|
||||
|
||||
### Added
|
||||
|
||||
* Provide metrics object as `prometheus.Collector`, for conventional metric registration.
|
||||
* Support non-default/global Prometheus registry.
|
||||
* Allow configuring counters with `prometheus.CounterOpts`.
|
||||
|
||||
### Changed
|
||||
|
||||
* Remove usage of deprecated `grpc.Code()`.
|
||||
* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`.
|
||||
|
||||
---
|
||||
|
||||
This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases).
|
16
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
16
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
@@ -49,8 +49,8 @@ import "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
...
|
||||
clientConn, err = grpc.Dial(
|
||||
address,
|
||||
grpc.WithUnaryInterceptor(UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(StreamClientInterceptor)
|
||||
grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor)
|
||||
)
|
||||
client = pb_testproto.NewTestServiceClient(clientConn)
|
||||
resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
|
||||
@@ -118,7 +118,7 @@ each of the 20 messages sent back, a counter will be incremented:
|
||||
grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
|
||||
```
|
||||
|
||||
After the call completes, it's status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go))
|
||||
After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go))
|
||||
and the relevant call labels increment the `grpc_server_handled_total` counter.
|
||||
|
||||
```jsoniq
|
||||
@@ -128,8 +128,8 @@ grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mw
|
||||
## Histograms
|
||||
|
||||
[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
|
||||
to measure latency distributions of your RPCs. However since it is bad practice to have metrics
|
||||
of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels))
|
||||
to measure latency distributions of your RPCs. However, since it is bad practice to have metrics
|
||||
of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)
|
||||
the latency monitoring metrics are disabled by default. To enable them please call the following
|
||||
in your server initialization code:
|
||||
|
||||
@@ -137,8 +137,8 @@ in your server initialization code:
|
||||
grpc_prometheus.EnableHandlingTimeHistogram()
|
||||
```
|
||||
|
||||
After the call completes, it's handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||
variable `grpc_server_handling_seconds`. It contains three sub-metrics:
|
||||
After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||
variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics:
|
||||
|
||||
* `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method
|
||||
* `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for
|
||||
@@ -168,7 +168,7 @@ grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_se
|
||||
|
||||
## Useful query examples
|
||||
|
||||
Prometheus philosophy is to provide the most detailed metrics possible to the monitoring system, and
|
||||
Prometheus philosophy is to provide raw metrics to the monitoring system, and
|
||||
let the aggregations be handled there. The verbosity of above metrics make it possible to have that
|
||||
flexibility. Here's a couple of useful monitoring queries:
|
||||
|
||||
|
31
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
generated
vendored
31
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ClientMetrics represents a collection of metrics to be registered on a
|
||||
@@ -25,31 +26,32 @@ type ClientMetrics struct {
|
||||
// ClientMetrics when not using the default Prometheus metrics registry, for
|
||||
// example when wanting to control which metrics are added to a registry as
|
||||
// opposed to automatically adding metrics via init functions.
|
||||
func NewClientMetrics() *ClientMetrics {
|
||||
func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
|
||||
opts := counterOptions(counterOpts)
|
||||
return &ClientMetrics{
|
||||
clientStartedCounter: prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_started_total",
|
||||
Help: "Total number of RPCs started on the client.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientHandledCounter: prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_handled_total",
|
||||
Help: "Total number of RPCs completed by the client, regardless of success or failure.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
|
||||
|
||||
clientStreamMsgReceived: prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_msg_received_total",
|
||||
Help: "Total number of RPC stream messages received by the client.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientStreamMsgSent: prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_msg_sent_total",
|
||||
Help: "Total number of gRPC stream messages sent by the client.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientHandledHistogramEnabled: false,
|
||||
clientHandledHistogramOpts: prom.HistogramOpts{
|
||||
@@ -111,18 +113,20 @@ func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, metho
|
||||
if err != nil {
|
||||
monitor.ReceivedMessage()
|
||||
}
|
||||
monitor.Handled(grpc.Code(err))
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// StreamServerInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
monitor := newClientReporter(m, clientStreamType(desc), method)
|
||||
clientStream, err := streamer(ctx, desc, cc, method, opts...)
|
||||
if err != nil {
|
||||
monitor.Handled(grpc.Code(err))
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return nil, err
|
||||
}
|
||||
return &monitoredClientStream{clientStream, monitor}, nil
|
||||
@@ -159,7 +163,8 @@ func (s *monitoredClientStream) RecvMsg(m interface{}) error {
|
||||
} else if err == io.EOF {
|
||||
s.monitor.Handled(codes.OK)
|
||||
} else {
|
||||
s.monitor.Handled(grpc.Code(err))
|
||||
st, _ := status.FromError(err)
|
||||
s.monitor.Handled(st.Code())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
16
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
generated
vendored
Normal file
16
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
SHELL="/bin/bash"
|
||||
|
||||
GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/)
|
||||
|
||||
all: vet fmt test
|
||||
|
||||
fmt:
|
||||
go fmt $(GOFILES_NOVENDOR)
|
||||
|
||||
vet:
|
||||
go vet $(GOFILES_NOVENDOR)
|
||||
|
||||
test: vet
|
||||
./scripts/test_all.sh
|
||||
|
||||
.PHONY: all vet test
|
41
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
generated
vendored
Normal file
41
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// A CounterOption lets you add options to Counter metrics using With* funcs.
|
||||
type CounterOption func(*prom.CounterOpts)
|
||||
|
||||
type counterOptions []CounterOption
|
||||
|
||||
func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts {
|
||||
for _, f := range co {
|
||||
f(&o)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// WithConstLabels allows you to add ConstLabels to Counter metrics.
|
||||
func WithConstLabels(labels prom.Labels) CounterOption {
|
||||
return func(o *prom.CounterOpts) {
|
||||
o.ConstLabels = labels
|
||||
}
|
||||
}
|
||||
|
||||
// A HistogramOption lets you add options to Histogram metrics using With*
|
||||
// funcs.
|
||||
type HistogramOption func(*prom.HistogramOpts)
|
||||
|
||||
// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
|
||||
func WithHistogramBuckets(buckets []float64) HistogramOption {
|
||||
return func(o *prom.HistogramOpts) { o.Buckets = buckets }
|
||||
}
|
||||
|
||||
// WithHistogramConstLabels allows you to add custom ConstLabels to
|
||||
// histograms metrics.
|
||||
func WithHistogramConstLabels(labels prom.Labels) HistogramOption {
|
||||
return func(o *prom.HistogramOpts) {
|
||||
o.ConstLabels = labels
|
||||
}
|
||||
}
|
57
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
generated
vendored
57
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ServerMetrics represents a collection of metrics to be registered on a
|
||||
@@ -22,28 +23,29 @@ type ServerMetrics struct {
|
||||
// ServerMetrics when not using the default Prometheus metrics registry, for
|
||||
// example when wanting to control which metrics are added to a registry as
|
||||
// opposed to automatically adding metrics via init functions.
|
||||
func NewServerMetrics() *ServerMetrics {
|
||||
func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics {
|
||||
opts := counterOptions(counterOpts)
|
||||
return &ServerMetrics{
|
||||
serverStartedCounter: prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_started_total",
|
||||
Help: "Total number of RPCs started on the server.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
serverHandledCounter: prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_handled_total",
|
||||
Help: "Total number of RPCs completed on the server, regardless of success or failure.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
|
||||
serverStreamMsgReceived: prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_msg_received_total",
|
||||
Help: "Total number of RPC stream messages received on the server.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
serverStreamMsgSent: prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_msg_sent_total",
|
||||
Help: "Total number of gRPC stream messages sent by the server.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
serverHandledHistogramEnabled: false,
|
||||
serverHandledHistogramOpts: prom.HistogramOpts{
|
||||
Name: "grpc_server_handling_seconds",
|
||||
@@ -54,13 +56,6 @@ func NewServerMetrics() *ServerMetrics {
|
||||
}
|
||||
}
|
||||
|
||||
type HistogramOption func(*prom.HistogramOpts)
|
||||
|
||||
// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
|
||||
func WithHistogramBuckets(buckets []float64) HistogramOption {
|
||||
return func(o *prom.HistogramOpts) { o.Buckets = buckets }
|
||||
}
|
||||
|
||||
// EnableHandlingTimeHistogram enables histograms being registered when
|
||||
// registering the ServerMetrics on a Prometheus registry. Histograms can be
|
||||
// expensive on Prometheus servers. It takes options to configure histogram
|
||||
@@ -110,7 +105,8 @@ func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req i
|
||||
monitor := newServerReporter(m, Unary, info.FullMethod)
|
||||
monitor.ReceivedMessage()
|
||||
resp, err := handler(ctx, req)
|
||||
monitor.Handled(grpc.Code(err))
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
if err == nil {
|
||||
monitor.SentMessage()
|
||||
}
|
||||
@@ -121,9 +117,10 @@ func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req i
|
||||
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
monitor := newServerReporter(m, streamRpcType(info), info.FullMethod)
|
||||
monitor := newServerReporter(m, streamRPCType(info), info.FullMethod)
|
||||
err := handler(srv, &monitoredServerStream{ss, monitor})
|
||||
monitor.Handled(grpc.Code(err))
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -140,27 +137,7 @@ func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) {
|
||||
}
|
||||
}
|
||||
|
||||
// Register registers all server metrics in a given metrics registry. Depending
|
||||
// on histogram options and whether they are enabled, histogram metrics are
|
||||
// also registered.
|
||||
//
|
||||
// Deprecated: ServerMetrics implements Prometheus Collector interface. You can
|
||||
// register an instance of ServerMetrics directly by using
|
||||
// prometheus.Register(m).
|
||||
func (m *ServerMetrics) Register(r prom.Registerer) error {
|
||||
return r.Register(m)
|
||||
}
|
||||
|
||||
// MustRegister tries to register all server metrics and panics on an error.
|
||||
//
|
||||
// Deprecated: ServerMetrics implements Prometheus Collector interface. You can
|
||||
// register an instance of ServerMetrics directly by using
|
||||
// prometheus.MustRegister(m).
|
||||
func (m *ServerMetrics) MustRegister(r prom.Registerer) {
|
||||
r.MustRegister(m)
|
||||
}
|
||||
|
||||
func streamRpcType(info *grpc.StreamServerInfo) grpcType {
|
||||
func streamRPCType(info *grpc.StreamServerInfo) grpcType {
|
||||
if info.IsClientStream && !info.IsServerStream {
|
||||
return ClientStream
|
||||
} else if !info.IsClientStream && info.IsServerStream {
|
||||
|
14
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/test_all.sh
generated
vendored
14
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/test_all.sh
generated
vendored
@@ -1,14 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v vendor); do
|
||||
echo -e "TESTS FOR: for \033[0;35m${d}\033[0m"
|
||||
go test -race -v -coverprofile=profile.coverage.out -covermode=atomic $d
|
||||
if [ -f profile.coverage.out ]; then
|
||||
cat profile.coverage.out >> coverage.txt
|
||||
rm profile.coverage.out
|
||||
fi
|
||||
echo ""
|
||||
done
|
6
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
generated
vendored
6
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
generated
vendored
@@ -37,13 +37,13 @@ func splitMethodName(fullMethodName string) (string, string) {
|
||||
}
|
||||
|
||||
func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
|
||||
if mInfo.IsClientStream == false && mInfo.IsServerStream == false {
|
||||
if !mInfo.IsClientStream && !mInfo.IsServerStream {
|
||||
return Unary
|
||||
}
|
||||
if mInfo.IsClientStream == true && mInfo.IsServerStream == false {
|
||||
if mInfo.IsClientStream && !mInfo.IsServerStream {
|
||||
return ClientStream
|
||||
}
|
||||
if mInfo.IsClientStream == false && mInfo.IsServerStream == true {
|
||||
if !mInfo.IsClientStream && mInfo.IsServerStream {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
|
Reference in New Issue
Block a user