V2 Prometheus metrics port from v1

Hi,

I’m having a look at porting GitHub - miekg/caddy-prometheus: Prometheus metrics middleware for caddy for caddy v2

Caddy v1 had an event emitted “on start” (caddy-prometheus/setup.go at master · miekg/caddy-prometheus · GitHub)

What would you recommend as an approach to replicate similar behaviour?

2 Likes

Awesome! Let’s see…

V2 doesn’t (yet?) have events like Caddy 1 did (they weren’t super useful in v1 anyway, not the way they were implemented, I think).

Instead, v2 has the module lifecycle: Extending Caddy — Caddy Documentation

Keep in mind that configs and their modules in v2 get loaded and unloaded frequently, with potentially multiple overlapping, so if you need to keep global state across module lifetimes, be aware of that. (I can provide more tips if you have a specific use case / question later on.)

So instead of “on start” you probably want Provision() – but it really depends what kind of module you’re writing. Is it an HTTP handler? Or an app? Or something else?

Keep in mind that Provision() runs even if a config is just being validated and not fully run, so don’t use Provision to start any long-running things or do any expensive tasks.

Thanks a lot for your answer, I’ll experiment it and come back to you!

1 Like

@nchagrass How’s it going with porting the Prometheus extension to Caddy v2? I’d love to upgrade, but I really want to keep having metrics :nerd_face:.

@n1try I’ve made very basic progress but I actually stripped out some features

I’m not sure where to take this to next (my Golang skills ain’t great)

This works but is not 100% representative of the available features in v1

# handler.go
package prometheus

    import (
    	"github.com/caddyserver/caddy/v2"
    	"github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
    	"github.com/caddyserver/caddy/v2/modules/caddyhttp"
    	"github.com/prometheus/client_golang/prometheus"
    	"github.com/prometheus/client_golang/prometheus/promhttp"
    	"log"
    	"net/http"
    	"os"
    	"strconv"
    	"time"
    )

    func init() {
    	caddy.RegisterModule(Metrics{})
    	httpcaddyfile.RegisterHandlerDirective("prometheus", parseCaddyfile)
    }

    func (Metrics) CaddyModule() caddy.ModuleInfo {
    	return caddy.ModuleInfo{
    		ID:  "http.handlers.prometheus",
    		New: func() caddy.Module { return NewMetrics() },
    	}
    }

    func (m *Metrics) Provision(ctx caddy.Context) error {
    	m.handler = promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{
    		ErrorHandling: promhttp.HTTPErrorOnError,
    		ErrorLog:      log.New(os.Stderr, "", log.LstdFlags),
    	})

    	once.Do(func() {
    		m.start()
    	})

    	return nil
    }

    // ServeHTTP implements caddyhttp.MiddlewareHandler.
    func (m Metrics) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {

    	requestURI := r.RequestURI
    	statusStr := "0"
    	var statusInt int

    	start := time.Now()

    	rw := caddyhttp.NewResponseRecorder(w, nil, func(status int, header http.Header) bool {
    		statusInt = status
    		return false
    	})

    	err := next.ServeHTTP(rw, r)
    	if err == nil {
    		rw.WriteResponse()
    		statusStr = strconv.Itoa(rw.Status())
    	} else {
    		if handlerErr, ok := err.(caddyhttp.HandlerError); ok {
    			statusInt = handlerErr.StatusCode
    		}
    		statusStr = strconv.Itoa(statusInt)
    	}

    	d := time.Now().Sub(start)

    	requestCount.WithLabelValues(append([]string{requestURI, r.Method, statusStr})...).Inc()
    	responseLatency.WithLabelValues(append([]string{requestURI, r.Method, statusStr})...).Observe(d.Seconds())
    	requestDuration.WithLabelValues(append([]string{requestURI, r.Method, statusStr})...).Observe(time.Since(start).Seconds())
    	responseSize.WithLabelValues(append([]string{requestURI, r.Method, statusStr})...).Observe(float64(rw.Size()))
    	responseStatus.WithLabelValues(append([]string{requestURI, r.Method, statusStr})...).Inc()

    	return err
    }

    func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
    	var (
    		metrics *Metrics
    		err     error
    	)
    	if metrics != nil {
    		return nil, h.Dispenser.Err("prometheus: can only have one metrics module per server")
    	}
    	metrics = NewMetrics()

    	return metrics, err
    }

    // Interface guards
    var (
    	_ caddy.Provisioner           = (*Metrics)(nil)
    	_ caddyhttp.MiddlewareHandler = (*Metrics)(nil)
    )


# metrics.go
package prometheus

import (
	"github.com/prometheus/client_golang/prometheus"
)

const namespace = "caddy"

var (
	requestCount    *prometheus.CounterVec
	requestDuration *prometheus.HistogramVec
	responseSize    *prometheus.HistogramVec
	responseStatus  *prometheus.CounterVec
	responseLatency *prometheus.HistogramVec
)

func (m Metrics) define(subsystem string) {
	if subsystem == "" {
		subsystem = "http"
	}
	if m.latencyBuckets == nil {
		m.latencyBuckets = append(prometheus.DefBuckets, 15, 20, 30, 60, 120, 180, 240, 480, 960)
	}
	if m.sizeBuckets == nil {
		m.sizeBuckets = []float64{0, 500, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 50000, 1e5, 5e5, 1e6, 2e6, 3e6, 4e6, 5e6, 10e6}
	}

	requestCount = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Subsystem: subsystem,
		Name:      "request_count_total",
		Help:      "Counter of HTTP(S) requests made.",
	}, append([]string{"handler", "method", "status_code"}))

	requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: namespace,
		Subsystem: subsystem,
		Name:      "request_duration_seconds",
		Help:      "Histogram of the time (in seconds) each request took.",
		Buckets:   m.latencyBuckets,
	}, append([]string{"handler", "method", "status_code"}))

	responseSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: namespace,
		Subsystem: subsystem,
		Name:      "response_size_bytes",
		Help:      "Size of the returns response in bytes.",
		Buckets:   m.sizeBuckets,
	}, append([]string{"handler", "method", "status_code"}))

	responseStatus = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Subsystem: subsystem,
		Name:      "response_status_count_total",
		Help:      "Counter of response status codes.",
	}, append([]string{"handler", "method", "status_code"}))

	responseLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: namespace,
		Subsystem: subsystem,
		Name:      "response_latency_seconds",
		Help:      "Histogram of the time (in seconds) until the first write for each request.",
		Buckets:   m.latencyBuckets,
	}, append([]string{"handler", "method", "status_code"}))
}


# setup.go
package prometheus

import (
	"github.com/caddyserver/caddy/v2/modules/caddyhttp"
	"github.com/prometheus/client_golang/prometheus"
	"log"
	"net/http"
	"sync"
)

const (
	defaultPath = "/metrics"
	defaultAddr = "localhost:9180"
)

var once sync.Once

// Metrics holds the prometheus configuration.
type Metrics struct {
	next           caddyhttp.Handler
	addr           string // where to we listen
	useCaddyAddr   bool
	hostname       string
	path           string
	latencyBuckets []float64
	sizeBuckets    []float64
	// subsystem?
	once sync.Once

	handler http.Handler
}

func NewMetrics() *Metrics {
	return &Metrics{
		path: defaultPath,
		addr: defaultAddr,
	}
}

func (m *Metrics) start() error {
	m.once.Do(func() {
		m.define("")

		prometheus.MustRegister(requestCount)
		prometheus.MustRegister(requestDuration)
		prometheus.MustRegister(responseLatency)
		prometheus.MustRegister(responseSize)
		prometheus.MustRegister(responseStatus)

		if !m.useCaddyAddr {
			http.Handle(m.path, m.handler)
			go func() {
				err := http.ListenAndServe(m.addr, nil)
				if err != nil {
					log.Printf("[ERROR] Starting handler: %v", err)
				}
			}()
		}
	})
	return nil
}
1 Like

That looks pretty good!

Do you have a repo published yet?

Got anything specific you need help with?

I’m not in a situation where I can support/maintain the work at the moment, I rather having someone else to taker over

I’m going to have a look at it too - will see what it’ll take to bring it up to par with the v1module :slight_smile:

Thanks to @nchagrass for getting this started!

I’ve just published GitHub - hairyhenderson/caddyprom: ⚠️ DEPRECATED - use the core metrics supported in Caddy v2.2.0-rc.2+, which is mostly the same in functionality. I’ve been also looking at a more general approach that will be able to track many more metrics, not just HTTP. But for now, this is a start!

2 Likes

It looks much better, nice work Dave!

1 Like

This topic was automatically closed 90 days after the last reply. New replies are no longer allowed.