Skip to content

Commit

Permalink
Merge pull request #599 from bogo-y/fix
Browse files Browse the repository at this point in the history
Fix metric unregistered
  • Loading branch information
k8s-ci-robot authored Mar 26, 2024
2 parents 9156bf3 + a5bcb39 commit b48bff4
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 9 deletions.
12 changes: 12 additions & 0 deletions cmd/adapter/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,11 +271,23 @@ func (cmd *PrometheusAdapter) addResourceMetricsAPI(promClient prom.Client, stop
return err
}

config, err := cmd.Config()
if err != nil {
return err
}
config.GenericConfig.EnableMetrics = false

server, err := cmd.Server()
if err != nil {
return err
}

metricsHandler, err := mprom.MetricsHandler()
if err != nil {
return err
}
server.GenericAPIServer.Handler.NonGoRestfulMux.HandleFunc("/metrics", metricsHandler)

if err := api.Install(provider, podInformer.Lister(), informer.Core().V1().Nodes().Lister(), server.GenericAPIServer, nil); err != nil {
return err
}
Expand Down
34 changes: 25 additions & 9 deletions pkg/client/metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,30 +18,46 @@ package metrics

import (
"context"
"net/http"
"net/url"
"time"

"github.com/prometheus/client_golang/prometheus"

apimetrics "k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"

"sigs.k8s.io/prometheus-adapter/pkg/client"
)

var (
// queryLatency is the total latency of any query going through the
// various endpoints (query, range-query, series). It includes some deserialization
// overhead and HTTP overhead.
queryLatency = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "cmgateway_prometheus_query_latency_seconds",
Help: "Prometheus client query latency in seconds. Broken down by target prometheus endpoint and target server",
Buckets: prometheus.ExponentialBuckets(0.0001, 2, 10),
queryLatency = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Namespace: "prometheus_adapter",
Subsystem: "prometheus_client",
Name: "request_duration_seconds",
Help: "Prometheus client query latency in seconds. Broken down by target prometheus endpoint and target server",
Buckets: prometheus.DefBuckets,
},
[]string{"endpoint", "server"},
[]string{"path", "server"},
)
)

func init() {
prometheus.MustRegister(queryLatency)
func MetricsHandler() (http.HandlerFunc, error) {
registry := metrics.NewKubeRegistry()
err := registry.Register(queryLatency)
if err != nil {
return nil, err
}
apimetrics.Register()
return func(w http.ResponseWriter, req *http.Request) {
legacyregistry.Handler().ServeHTTP(w, req)
metrics.HandlerFor(registry, metrics.HandlerOpts{}).ServeHTTP(w, req)
}, nil
}

// instrumentedClient is a client.GenericAPIClient which instruments calls to Do,
Expand All @@ -63,7 +79,7 @@ func (c *instrumentedGenericClient) Do(ctx context.Context, verb, endpoint strin
return
}
}
queryLatency.With(prometheus.Labels{"endpoint": endpoint, "server": c.serverName}).Observe(endTime.Sub(startTime).Seconds())
queryLatency.With(prometheus.Labels{"path": endpoint, "server": c.serverName}).Observe(endTime.Sub(startTime).Seconds())
}()

var resp client.APIResponse
Expand Down

0 comments on commit b48bff4

Please sign in to comment.