Caddy healthcheck logger

Hello All,

#1 - Problem - Health check module bloats the systemctl log files every 10 seconds saying the Host is up. host never comes down. How do I skip these log entries? The caddyfile instructions are unclear on this.

#2 - Redacted system logs because we don’t need 1000 things to prove this is repetitive.

Sep 20 23:24:15 caddy-A caddy[2555]: {"level":"info","ts":1726889055.9601016,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"ussdefiant.some.tld:8006"}
Sep 20 23:24:25 caddy-A caddy[2555]: {"level":"info","ts":1726889065.9539926,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"192.168.122.251:80"}
Sep 20 23:24:25 caddy-A caddy[2555]: {"level":"info","ts":1726889065.9543757,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"192.168.122.250:80"}
Sep 20 23:24:25 caddy-A caddy[2555]: {"level":"info","ts":1726889065.9556007,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"ussvoyager.some.tld:8006"}
Sep 20 23:24:25 caddy-A caddy[2555]: {"level":"info","ts":1726889065.9597545,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"ussdefiant.some.tld:8006"}
Sep 20 23:24:35 caddy-A caddy[2555]: {"level":"info","ts":1726889075.9545627,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"192.168.122.251:80"}
Sep 20 23:24:35 caddy-A caddy[2555]: {"level":"info","ts":1726889075.9548547,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"192.168.122.250:80"}
Sep 20 23:24:35 caddy-A caddy[2555]: {"level":"info","ts":1726889075.9561517,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"ussvoyager.some.tld:8006"}
Sep 20 23:24:35 caddy-A caddy[2555]: {"level":"info","ts":1726889075.960286,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","hos
t":"ussdefiant.some.tld:8006"}
Sep 20 23:24:45 caddy-A caddy[2555]: {"level":"info","ts":1726889085.9538763,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"192.168.122.251:80"}
Sep 20 23:24:45 caddy-A caddy[2555]: {"level":"info","ts":1726889085.9541879,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"192.168.122.250:80"}
Sep 20 23:24:45 caddy-A caddy[2555]: {"level":"info","ts":1726889085.955155,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","hos
t":"ussvoyager.some.tld:8006"}
Sep 20 23:24:45 caddy-A caddy[2555]: {"level":"info","ts":1726889085.9598725,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"ussdefiant.some.tld:8006"}
Sep 20 23:24:55 caddy-A caddy[2555]: {"level":"info","ts":1726889095.9545105,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"192.168.122.251:80"}
Sep 20 23:24:55 caddy-A caddy[2555]: {"level":"info","ts":1726889095.9547412,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"192.168.122.250:80"}
Sep 20 23:24:55 caddy-A caddy[2555]: {"level":"info","ts":1726889095.9562342,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"ussvoyager.some.tld:8006"}
Sep 20 23:24:55 caddy-A caddy[2555]: {"level":"info","ts":1726889095.9602382,"logger":"http.handlers.reverse_proxy.health_checker.active","msg":"host is up","ho
st":"ussdefiant.some.tld:8006"}

#3 Caddy version -
v2.8.4 h1:q3pe0wpBj1OcHFZ3n/1nl4V4bxBrYoSoab7rL9BMYNk= and rebuilt with cloudns and a few modules.

#4-a Installed via proxmox-ve tool scripts in an lxc container running debian 12.

#4-b - starts with systemctl

#4-c - systemctl file

root@caddy-A:~# cat /etc/systemd/system/multi-user.target.wants/caddy.service
# caddy.service
#
# For using Caddy with a config file.
#
# Make sure the ExecStart and ExecReload commands are correct
# for your installation.
#
# See https://caddyserver.com/docs/install for instructions.
#
# WARNING: This service does not use the --resume flag, so if you
# use the API to make changes, they will be overwritten by the
# Caddyfile next time the service is restarted. If you intend to
# use Caddy's API to configure it, add the --resume flag to the
# `caddy run` command or use the caddy-api.service file instead.

[Unit]
Description=Caddy
Documentation=https://caddyserver.com/docs/
After=network.target network-online.target
Requires=network-online.target

[Service]
Type=notify
User=caddy
Group=caddy
ExecStart=/usr/bin/caddy run --environ --config /etc/caddy/Caddyfile
ExecReload=/usr/bin/caddy reload --config /etc/caddy/Caddyfile --force
TimeoutStopSec=5s
LimitNOFILE=1048576
PrivateTmp=true
ProtectSystem=full
AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE

[Install]
WantedBy=multi-user.target
root@caddy-A:~# 

#4-d My Caddy config redacted, irrelevant sections removed. I do not have a syntax problem.

# Global

{
        email <REMOVED>
}


# Snipplts

(cloudns_default) {
        tls {
                issuer acme {
                    dns cloudns {
                                                auth_id "<REMOVED>"
                                                #sub_auth_id "<num>"
                                                auth_password "<REMOVED>"
                                        }
                        propagation_delay 120s
                        resolvers 185.136.96.79 1.1.1.1
                }
        }

        encode zstd gzip
        log {
                hostnames {args[0]}
                output file /var/log/caddy/{args[0]}.log
                format transform "{common_log}"
        }

}


# default site

:80 {
        root * /usr/share/caddy
        file_server
}



# define sites

<REMOVED>

proxmox.some.tld {
        import cloudns_default proxmox.some.tld
        reverse_proxy * {
        to ussvoyager.some.tld:8006
        to ussdefiant.some.tld:8006

        lb_policy ip_hash     # Makes backend sticky based on client ip
        lb_try_duration 1s
        lb_try_interval 250ms

        health_uri /          # Backend health check path
        # health_port 80      # Default same as backend port
        health_interval 10s
        health_timeout 2s
        health_status 200

        transport http {
            tls_insecure_skip_verify
        }
    }
}

Please fill out the help topic template as per the forum rules.

Please mind your post’s formatting, use code blocks for config and logs, not quotes. Post your entire config, and post more of your logs.

1 Like

updated …

Ah, sorry, this is a bug that got fixed on master but hasn’t been released yet.

You can build from master for the time being if you want the fix right away.

2 Likes

This topic was automatically closed 30 days after the last reply. New replies are no longer allowed.