1. The problem I’m having:
I am trying to run cloudflared and caddy in the same docker compose file. I have a static site that I am currently trying to setup on a raspberry pi, but will be migrating to an x86 mini pc eventually.
I was getting a bad gateway error. I tweaked some settings (I don’t remember which ones), and now on the https page I get a SLL erorr warning but can continue to http page. On the http, I get too many redirects.
2. Error messages and/or full log output:
caddy | {"level":"info","ts":1741018195.1491358,"logger":"admin","msg":"admin endpoint started","address":"localhost:2019","enforce_origin":false,"origins":["//localhost:2019","//[::1]:2019","//127.0.0.1:2019"]}
caddy | {"level":"info","ts":1741018195.1500506,"logger":"http.auto_https","msg":"server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS","server_name":"srv0","https_port":443}
caddy | {"level":"info","ts":1741018195.1501296,"logger":"http.auto_https","msg":"enabling automatic HTTP->HTTPS redirects","server_name":"srv0"}
caddy | {"level":"debug","ts":1741018195.150217,"logger":"http.auto_https","msg":"adjusted config","tls":{"automation":{"policies":[{"subjects":["*.mtonc.dev","mtonc.dev"]},{}]}},"http":{"servers":{"remaining_auto_https_redirects":{"listen":[":80"],"routes":[{},{}]},"srv0":{"listen":[":443"],"routes":[{"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"headers","response":{"set":{"Permissions-Policy":["interest-cohort=()"],"Strict-Transport-Security":["max-age=31536000"],"X-Content-Type-Options":["nosniff"],"X-Frame-Options":["DENY"]}}}]},{"group":"group4","handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"static_response","headers":{"Location":["https://mtonc.dev"]},"status_code":302}]}]}],"match":[{"protocol":"http"}]},{"group":"group4","handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"static_response","headers":{"Location":["https://mtonc.dev"]},"status_code":302}]}]}],"match":[{"host":["www.mtonc.dev"]}]},{"group":"group4","handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"vars","root":"/usr/share/caddy"}]},{"handle":[{"handler":"rewrite","uri":"{http.matchers.file.relative}"}],"match":[{"file":{"try_files":["{http.request.uri.path}","/index.html"]}}]},{"handle":[{"encodings":{"gzip":{},"zstd":{}},"handler":"encode","prefer":["zstd","gzip"]},{"handler":"file_server","hide":["/etc/caddy/Caddyfile"]}]}]}],"match":[{"host":["https://mtonc.dev"]}]},{"group":"group4","handle":[{"handler":"subroute","routes":[{"handle":[{"abort":true,"handler":"static_response"}]}]}]}]}],"terminal":true}],"tls_connection_policies":[{}],"automatic_https":{}}}}}
caddy | {"level":"info","ts":1741018195.1518097,"logger":"tls.cache.maintenance","msg":"started background certificate maintenance","cache":"0x4000576000"}
caddy | {"level":"debug","ts":1741018195.1545935,"logger":"http","msg":"starting server loop","address":"[::]:80","tls":false,"http3":false}
caddy | {"level":"warn","ts":1741018195.1546686,"logger":"http","msg":"HTTP/2 skipped because it requires TLS","network":"tcp","addr":":80"}
caddy | {"level":"warn","ts":1741018195.1546807,"logger":"http","msg":"HTTP/3 skipped because it requires TLS","network":"tcp","addr":":80"}
caddy | {"level":"info","ts":1741018195.1546893,"logger":"http.log","msg":"server running","name":"remaining_auto_https_redirects","protocols":["h1","h2","h3"]}
caddy | {"level":"debug","ts":1741018195.1548142,"logger":"http","msg":"starting server loop","address":"[::]:443","tls":true,"http3":false}
caddy | {"level":"info","ts":1741018195.1548405,"logger":"http","msg":"enabling HTTP/3 listener","addr":":443"}
caddy | {"level":"info","ts":1741018195.1550536,"msg":"failed to sufficiently increase receive buffer size (was: 208 kiB, wanted: 7168 kiB, got: 416 kiB). See https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes for details."}
caddy | {"level":"info","ts":1741018195.155522,"logger":"http.log","msg":"server running","name":"srv0","protocols":["h1","h2","h3"]}
caddy | {"level":"info","ts":1741018195.1555755,"logger":"http","msg":"enabling automatic TLS certificate management","domains":["mtonc.dev","*.mtonc.dev"]}
caddy | {"level":"debug","ts":1741018195.157244,"logger":"tls.cache","msg":"added certificate to cache","subjects":["mtonc.dev"],"expiration":1748786516,"managed":true,"issuer_key":"acme-v02.api.letsencrypt.org-directory","hash":"e2ce65b65e1a6a1350ec35788b32290692c2e15713445ca2134bce3bbd46fc82","cache_size":1,"cache_capacity":10000}
caddy | {"level":"debug","ts":1741018195.157348,"logger":"events","msg":"event","name":"cached_managed_cert","id":"2c1217a1-27f4-4c47-afe8-7f4013bb02b7","origin":"tls","data":{"sans":["mtonc.dev"]}}
caddy | {"level":"debug","ts":1741018195.1588726,"logger":"tls.cache","msg":"added certificate to cache","subjects":["*.mtonc.dev"],"expiration":1748752157,"managed":true,"issuer_key":"acme-v02.api.letsencrypt.org-directory","hash":"3c74354216eccb1dd1e938894f1e9406b52ba57de29a61dc46584e1038844cfc","cache_size":2,"cache_capacity":10000}
caddy | {"level":"debug","ts":1741018195.1590037,"logger":"events","msg":"event","name":"cached_managed_cert","id":"4ec45446-fac1-4d59-a2c5-95bf81593ad2","origin":"tls","data":{"sans":["*.mtonc.dev"]}}
caddy | {"level":"info","ts":1741018195.1669068,"msg":"autosaved config (load with --resume flag)","file":"/config/caddy/autosave.json"}
caddy | {"level":"info","ts":1741018195.1669767,"msg":"serving initial configuration"}
caddy | {"level":"info","ts":1741018195.1683507,"logger":"tls","msg":"storage cleaning happened too recently; skipping for now","storage":"FileStorage:/data/caddy","instance":"316afe54-164e-472b-ae20-41dc5c589a65","try_again":1741104595.1683447,"try_again_in":86399.999997574}
caddy | {"level":"info","ts":1741018195.1688,"logger":"tls","msg":"finished cleaning storage units"}
cloudflared | 2025-03-03T16:09:56Z INF Starting tunnel tunnelID=156c7584-80e0-46b0-a680-204869aca3df
cloudflared | 2025-03-03T16:09:56Z INF Version 2025.2.0 (Checksum 74342f810af90b7773e8419a3bbd8cd28f1b5e80255aa4a5afac2d02545a0e6c)
cloudflared | 2025-03-03T16:09:56Z INF GOOS: linux, GOVersion: go1.22.5-devel-cf, GoArch: arm64
cloudflared | 2025-03-03T16:09:56Z INF Settings: map[no-autoupdate:true token:*****]
cloudflared | 2025-03-03T16:09:56Z INF Generated Connector ID: 3e294bd2-77a9-443d-af0d-9422b925646a
cloudflared | 2025-03-03T16:09:56Z INF Initial protocol quic
cloudflared | 2025-03-03T16:09:56Z INF ICMP proxy will use 172.21.0.2 as source for IPv4
cloudflared | 2025-03-03T16:09:56Z INF ICMP proxy will use ::1 in zone lo as source for IPv6
cloudflared | 2025-03-03T16:09:56Z INF ICMP proxy will use 172.21.0.2 as source for IPv4
cloudflared | 2025-03-03T16:09:56Z INF ICMP proxy will use ::1 in zone lo as source for IPv6
cloudflared | 2025-03-03T16:09:56Z INF Starting metrics server on [::]:20241/metrics
cloudflared | 2025-03-03T16:09:56Z INF Using [CurveID(4588) CurveID(25497) CurveP256] as curve preferences connIndex=0 event=0 ip=198.41.192.47
cloudflared | 2025/03/03 16:09:56 failed to sufficiently increase receive buffer size (was: 208 kiB, wanted: 7168 kiB, got: 416 kiB). See https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes for details.
cloudflared | 2025-03-03T16:09:56Z INF Registered tunnel connection connIndex=0 connection=e99299be-0229-4fe1-b223-fde3e5d1543f event=0 ip=198.41.192.47 location=ord10 protocol=quic
cloudflared | 2025-03-03T16:09:56Z INF Using [CurveID(4588) CurveID(25497) CurveP256] as curve preferences connIndex=1 event=0 ip=198.41.200.73
cloudflared | 2025-03-03T16:09:56Z INF Updated to new configuration config="{\"ingress\":[{\"hostname\":\"mtonc.dev\", \"originRequest\":{\"http2Origin\":true, \"httpHostHeader\":\"mtonc.dev\", \"noTLSVerify\":true, \"originServerName\":\"mtonc.dev\"}, \"service\":\"https://caddy\"}, {\"hostname\":\"*.mtonc.dev\", \"originRequest\":{\"http2Origin\":true, \"httpHostHeader\":\"mtonc.dev\", \"noTLSVerify\":true, \"originServerName\":\"mtonc.dev\"}, \"service\":\"https://caddy\"}, {\"service\":\"http_status:404\"}], \"warp-routing\":{\"enabled\":false}}" version=19
cloudflared | 2025-03-03T16:09:56Z INF Registered tunnel connection connIndex=1 connection=8448589f-e27d-41dc-b275-1e1a0576d1ef event=0 ip=198.41.200.73 location=ord11 protocol=quic
cloudflared | 2025-03-03T16:09:57Z INF Using [CurveID(4588) CurveID(25497) CurveP256] as curve preferences connIndex=2 event=0 ip=198.41.192.57
cloudflared | 2025-03-03T16:09:57Z INF Registered tunnel connection connIndex=2 connection=59875bc5-4828-4305-97f3-c13df281a911 event=0 ip=198.41.192.57 location=ord10 protocol=quic
cloudflared | 2025-03-03T16:09:58Z INF Using [CurveID(4588) CurveID(25497) CurveP256] as curve preferences connIndex=3 event=0 ip=198.41.200.233
cloudflared | 2025-03-03T16:09:58Z INF Registered tunnel connection connIndex=3 connection=d0ca5888-b86b-424b-bd28-c5e832d28453 event=0 ip=198.41.200.233 location=ord08 protocol=quic
3. Caddy version:
docker container caddy:2.9.1-alpine
4. How I installed and ran Caddy:
Docker Compose
a. System environment:
Docker is running on a headless raspi running raspberry pi OS. I am connecting to it over ssh from an old Macbook Air running Pop_OS. Both machines are on the same network/Wifi
b. Command:
docker compose up --build
# Copy over package declarations
FROM node:lts AS base
WORKDIR /app
COPY package*.json ./
# install packages
FROM base AS build-deps
RUN npm install
# Copy packages and resources and build the site
FROM base AS astro_build
COPY --from=build-deps /app/node_modules /app/node_modules
COPY . .
RUN npm run build
# Build caddy with dns connector
FROM caddy:2.9.1-builder-alpine AS builder
RUN xcaddy build \
--with github.com/caddy-dns/cloudflare
# Copy caddy from builder, and site from astro build
FROM caddy:2.9.1-alpine
# install unmet dependency
RUN apk add nss-tools
# Copy caddy binary
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
# Clear out folder
RUN rm -rf /usr/share/caddy/*
# Copy built site to clean folder
COPY --from=astro_build /app/dist /usr/share/caddy
# Copy Caddyfile
COPY ./Caddyfile /etc/caddy/Caddyfile
# Format Caddy File
RUN cd /etc/caddy && caddy fmt --overwrite
# Run caddy as daemon with custom config location
CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile"]
c. Service/unit/compose file:
services:
caddy:
build:
context: .
dockerfile: Dockerfile
container_name: caddy
ports:
- "80:80"
- "443:443"
- "443:443/udp"
volumes:
- caddy_data:/data
- caddy_config:/config
restart: unless-stopped
networks:
- caddy_net
- cloudflare
env_file:
- .env
cloudflared:
container_name: cloudflared
image: cloudflare/cloudflared
restart: unless-stopped
command: tunnel --no-autoupdate run --token ${TUNNEL_TOKEN}
depends_on:
- caddy
networks:
- cloudflare
volumes:
caddy_data:
caddy_config:
networks:
caddy_net:
name: caddy_net
attachable: true
cloudflare:
name: cloudflare
attachable: true
d. My complete Caddy config:
{
debug
email my-email (redacted)
}
(www_https_redir) {
redir https://mtonc.dev
}
mtonc.dev *.mtonc.dev {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_ACCOUNT_TOKEN}
api_token {env.CF_API_ACCOUNT_TOKEN}
}
resolvers 1.1.1.1
}
header {
# disable FLoC tracking
Permissions-Policy interest-cohort=()
# enable HSTS
Strict-Transport-Security max-age=31536000
# disable clients from sniffing the media type
X-Content-Type-Options nosniff
# clickjacking protection
X-Frame-Options DENY
}
@www host www.mtonc.dev
@site host https://mtonc.dev
@sub host *.mtonc.dev
handle @www {
import www_https_redir
}
handle @site {
file_server
root * /usr/share/caddy
try_files {path} /index.html
encode zstd gzip
}
handle {
abort
}
}
5. Links to relevant resources:
Cloudflare tunnel pulic hostnames
host: mtonc.dev
service: https://caddy:443
NoTLSVerify: true
Host Header: mtonc.dev
OriginServerName: mtonc.dev
host: *.mtonc.dev
service: https://caddy:443
NoTLSVerify: true
Host Header: mtonc.dev
OriginServerName: mtonc.dev
6. What I tried:
I have tried messing around with the networks in the docker compose file with no results. I have also tried changing the service name inside the cloudflare tunnel config via the dashboard. For that I have tried https://caddy:443, https://localhost:443, https://mtonc.dev. Not exactly sure what to put for the service name as I have seen it done multiple ways.