1. The problem I’m having:
I am trying to use Caddy + Tailscale in Docker as a reverse proxy to each service in my docker container.
Sometimes, visiting a url such torrents.somedomain.ts.net
may work on a docker restart.
Some months ago, a similar configuration was working flawlessly
Each container actually logs in Tailscale Admin Panel
2. Error messages and/or full log output:
Caddy
[
{
"level": "info",
"ts": 1734195578.475382,
"msg": "using config from file",
"file": "/etc/caddy/Caddyfile"
},
{
"level": "info",
"ts": 1734195578.476168,
"msg": "adapted config to JSON",
"adapter": "caddyfile"
},
{
"level": "warn",
"ts": 1734195578.4761744,
"msg": "Caddyfile input is not formatted; run 'caddy fmt --overwrite' to fix inconsistencies",
"adapter": "caddyfile",
"file": "/etc/caddy/Caddyfile",
"line": 33
},
{
"level": "info",
"ts": 1734195578.4767532,
"logger": "admin",
"msg": "admin endpoint started",
"address": "localhost:2019",
"enforce_origin": false,
"origins": ["//localhost:2019", "//[::1]:2019", "//127.0.0.1:2019"]
},
{
"level": "info",
"ts": 1734195578.4769137,
"logger": "http.auto_https",
"msg": "server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS",
"server_name": "srv0",
"https_port": 443
},
{
"level": "info",
"ts": 1734195578.4769201,
"logger": "http.auto_https",
"msg": "enabling automatic HTTP->HTTPS redirects",
"server_name": "srv0"
},
{
"level": "info",
"ts": 1734195578.4769695,
"logger": "tls.cache.maintenance",
"msg": "started background certificate maintenance",
"cache": "0xc00077e800"
},
{
"level": "info",
"ts": 1734195578.4769802,
"logger": "http.auto_https",
"msg": "server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS",
"server_name": "srv1",
"https_port": 443
},
{
"level": "info",
"ts": 1734195578.4769986,
"logger": "http.auto_https",
"msg": "enabling automatic HTTP->HTTPS redirects",
"server_name": "srv1"
},
{
"level": "info",
"ts": 1734195578.4770079,
"logger": "http.auto_https",
"msg": "server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS",
"server_name": "srv2",
"https_port": 443
},
{
"level": "info",
"ts": 1734195578.4770172,
"logger": "http.auto_https",
"msg": "enabling automatic HTTP->HTTPS redirects",
"server_name": "srv2"
},
{
"level": "info",
"ts": 1734195578.481587,
"logger": "tailscale",
"msg": "tsnet running state path /config/tsnet-caddy-caddy/tailscaled.state"
},
{
"level": "info",
"ts": 1734195578.4823794,
"logger": "tls",
"msg": "cleaning storage unit",
"storage": "FileStorage:/data/caddy"
},
{
"level": "info",
"ts": 1734195578.4826326,
"logger": "tls",
"msg": "finished cleaning storage units"
},
{
"level": "info",
"ts": 1734195578.4858022,
"logger": "tailscale",
"msg": "tsnet starting with hostname \"caddy\", varRoot \"/config/tsnet-caddy-caddy\""
},
{
"level": "info",
"ts": 1734195579.489294,
"logger": "tailscale",
"msg": "LocalBackend state is NeedsLogin; running StartLoginInteractive..."
},
{
"level": "info",
"ts": 1734195579.4893365,
"logger": "http",
"msg": "enabling HTTP/3 listener",
"addr": "caddy:443"
},
{
"level": "info",
"ts": 1734195580.5477824,
"msg": "connection doesn't allow setting of receive buffer size. Not a *net.UDPConn?. See https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes for details."
},
{
"level": "info",
"ts": 1734195580.5478969,
"logger": "http.log",
"msg": "server running",
"name": "srv0",
"protocols": ["h1", "h2", "h3"]
},
{
"level": "info",
"ts": 1734195580.5517097,
"logger": "tailscale",
"msg": "tsnet running state path /config/tsnet-caddy-sonarr/tailscaled.state"
},
{
"level": "info",
"ts": 1734195580.5532124,
"logger": "tailscale",
"msg": "tsnet starting with hostname \"sonarr\", varRoot \"/config/tsnet-caddy-sonarr\""
},
{
"level": "info",
"ts": 1734195581.5551758,
"logger": "tailscale",
"msg": "LocalBackend state is NeedsLogin; running StartLoginInteractive..."
},
{
"level": "info",
"ts": 1734195581.5552301,
"logger": "http",
"msg": "enabling HTTP/3 listener",
"addr": "sonarr:443"
},
{
"level": "info",
"ts": 1734195582.2373395,
"logger": "http.log",
"msg": "server running",
"name": "srv1",
"protocols": ["h1", "h2", "h3"]
},
{
"level": "info",
"ts": 1734195582.2420764,
"logger": "tailscale",
"msg": "tsnet running state path /config/tsnet-caddy-torrents/tailscaled.state"
},
{
"level": "info",
"ts": 1734195582.2440996,
"logger": "tailscale",
"msg": "tsnet starting with hostname \"torrents\", varRoot \"/config/tsnet-caddy-torrents\""
},
{
"level": "info",
"ts": 1734195583.2477844,
"logger": "tailscale",
"msg": "LocalBackend state is NeedsLogin; running StartLoginInteractive..."
},
{
"level": "info",
"ts": 1734195583.247807,
"logger": "http",
"msg": "enabling HTTP/3 listener",
"addr": "torrents:443"
},
{
"level": "info",
"ts": 1734195584.4903367,
"logger": "tailscale",
"msg": "AuthLoop: state is Running; done"
},
{
"level": "info",
"ts": 1734195584.5401819,
"logger": "http.log",
"msg": "server running",
"name": "srv2",
"protocols": ["h1", "h2", "h3"]
},
{
"level": "info",
"ts": 1734195584.5402315,
"logger": "http.log",
"msg": "server running",
"name": "remaining_auto_https_redirects",
"protocols": ["h1", "h2", "h3"]
},
{
"level": "info",
"ts": 1734195584.5415654,
"msg": "autosaved config (load with --resume flag)",
"file": "/config/caddy/autosave.json"
},
{
"level": "info",
"ts": 1734195584.5415783,
"msg": "serving initial configuration"
},
{
"level": "info",
"ts": 1734195586.5558171,
"logger": "tailscale",
"msg": "AuthLoop: state is Running; done"
},
{
"level": "info",
"ts": 1734195588.2484698,
"logger": "tailscale",
"msg": "AuthLoop: state is Running; done"
},
{
"level": "error",
"ts": 1734195651.6931798,
"logger": "tls.handshake",
"msg": "external certificate manager",
"remote_ip": "100.69.246.22",
"remote_port": "55614",
"sni": "torrents.some-domain.ts.net",
"cert_manager": "*caddytls.Tailscale",
"cert_manager_idx": 0,
"error": "Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": context deadline exceeded"
},
{
"level": "error",
"ts": 1734195654.246506,
"logger": "tls.handshake",
"msg": "external certificate manager",
"remote_ip": "100.69.246.22",
"remote_port": "53744",
"sni": "sonarr.some-domain.ts.net",
"cert_manager": "*caddytls.Tailscale",
"cert_manager_idx": 0,
"error": "Get \"http://local-tailscaled.sock/localapi/v0/cert/sonarr.some-domain.ts.net?type=pair\": Get \"http://local-tailscaled.sock/localapi/v0/cert/sonarr.some-domain.ts.net?type=pair\": context deadline exceeded"
},
{
"level": "error",
"ts": 1734195711.6958892,
"logger": "tls.handshake",
"msg": "external certificate manager",
"remote_ip": "100.69.246.22",
"remote_port": "53864",
"sni": "torrents.some-domain.ts.net",
"cert_manager": "*caddytls.Tailscale",
"cert_manager_idx": 0,
"error": "Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": context deadline exceeded"
},
{
"level": "error",
"ts": 1734195714.2475407,
"logger": "tls.handshake",
"msg": "external certificate manager",
"remote_ip": "100.69.246.22",
"remote_port": "34428",
"sni": "sonarr.some-domain.ts.net",
"cert_manager": "*caddytls.Tailscale",
"cert_manager_idx": 0,
"error": "Get \"http://local-tailscaled.sock/localapi/v0/cert/sonarr.some-domain.ts.net?type=pair\": Get \"http://local-tailscaled.sock/localapi/v0/cert/sonarr.some-domain.ts.net?type=pair\": context deadline exceeded"
},
{
"level": "error",
"ts": 1734195771.6980417,
"logger": "tls.handshake",
"msg": "external certificate manager",
"remote_ip": "100.69.246.22",
"remote_port": "38246",
"sni": "torrents.some-domain.ts.net",
"cert_manager": "*caddytls.Tailscale",
"cert_manager_idx": 0,
"error": "Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": context deadline exceeded"
},
{
"level": "error",
"ts": 1734195774.2495153,
"logger": "tls.handshake",
"msg": "external certificate manager",
"remote_ip": "100.69.246.22",
"remote_port": "34414",
"sni": "sonarr.some-domain.ts.net",
"cert_manager": "*caddytls.Tailscale",
"cert_manager_idx": 0,
"error": "Get \"http://local-tailscaled.sock/localapi/v0/cert/sonarr.some-domain.ts.net?type=pair\": Get \"http://local-tailscaled.sock/localapi/v0/cert/sonarr.some-domain.ts.net?type=pair\": context deadline exceeded"
},
{
"level": "error",
"ts": 1734195831.7010493,
"logger": "tls.handshake",
"msg": "external certificate manager",
"remote_ip": "100.69.246.22",
"remote_port": "53906",
"sni": "torrents.some-domain.ts.net",
"cert_manager": "*caddytls.Tailscale",
"cert_manager_idx": 0,
"error": "Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": context deadline exceeded"
},
{
"level": "error",
"ts": 1734195834.2517724,
"logger": "tls.handshake",
"msg": "external certificate manager",
"remote_ip": "100.69.246.22",
"remote_port": "52142",
"sni": "sonarr.some-domain.ts.net",
"cert_manager": "*caddytls.Tailscale",
"cert_manager_idx": 0,
"error": "Get \"http://local-tailscaled.sock/localapi/v0/cert/sonarr.some-domain.ts.net?type=pair\": Get \"http://local-tailscaled.sock/localapi/v0/cert/sonarr.some-domain.ts.net?type=pair\": context deadline exceeded"
},
{
"level": "error",
"ts": 1734195891.7043295,
"logger": "tls.handshake",
"msg": "external certificate manager",
"remote_ip": "100.69.246.22",
"remote_port": "50830",
"sni": "torrents.some-domain.ts.net",
"cert_manager": "*caddytls.Tailscale",
"cert_manager_idx": 0,
"error": "Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": Get \"http://local-tailscaled.sock/localapi/v0/cert/torrents.some-domain.ts.net?type=pair\": context deadline exceeded"
}
]
Tailscale
boot: 2024/12/14 16:59:38 Starting tailscaled
boot: 2024/12/14 16:59:38 Waiting for tailscaled socket
2024/12/14 16:59:38 logtail started
2024/12/14 16:59:38 Program starting: v1.72.1-tf4a95663c, Go 1.22.5: []string{"tailscaled", "--socket=/tmp/tailscaled.sock", "--statedir=/var/lib/tailscale"}
2024/12/14 16:59:38 LogID: 7e20b8b6b181ac13d598f8efa6469a28b7b583adb428c3bc6b418c14782c3a1d
2024/12/14 16:59:38 logpolicy: using system state directory "/var/lib/tailscale"
logpolicy.ConfigFromFile /var/lib/tailscale/tailscaled.log.conf: open /var/lib/tailscale/tailscaled.log.conf: no such file or directory
logpolicy.Config.Validate for /var/lib/tailscale/tailscaled.log.conf: config is nil
2024/12/14 16:59:38 dns: [rc=unknown ret=direct]
2024/12/14 16:59:38 dns: using "direct" mode
2024/12/14 16:59:38 dns: using *dns.directManager
2024/12/14 16:59:38 wgengine.NewUserspaceEngine(tun "tailscale0") ...
2024/12/14 16:59:38 dns: [rc=unknown ret=direct]
2024/12/14 16:59:38 dns: using "direct" mode
2024/12/14 16:59:38 dns: using *dns.directManager
2024/12/14 16:59:38 link state: interfaces.State{defaultRoute=eth0 ifs={eth0:[172.20.0.204/16]} v4=true v6=false}
2024/12/14 16:59:38 onPortUpdate(port=37033, network=udp6)
2024/12/14 16:59:38 router: using firewall mode pref
2024/12/14 16:59:38 router: default choosing iptables
2024/12/14 16:59:38 router: netfilter running in iptables mode v6 = true, v6filter = true, v6nat = true
2024/12/14 16:59:38 onPortUpdate(port=60382, network=udp4)
2024/12/14 16:59:38 magicsock: disco key = d:b40c6447d7f35f43
2024/12/14 16:59:38 Creating WireGuard device...
2024/12/14 16:59:38 Bringing WireGuard device up...
2024/12/14 16:59:38 Bringing router up...
2024/12/14 16:59:38 external route: up
2024/12/14 16:59:38 Clearing router settings...
2024/12/14 16:59:38 Starting network monitor...
2024/12/14 16:59:38 Engine created.
2024/12/14 16:59:38 monitor: [unexpected] network state changed, but stringification didn't: interfaces.State{defaultRoute=eth0 ifs={eth0:[172.20.0.204/16]} v4=true v6=false}
2024/12/14 16:59:38 pm: migrating "_daemon" profile to new format
2024/12/14 16:59:38 monitor: [unexpected] old: {"InterfaceIPs":{"eth0":["172.20.0.204/16"],"lo":["127.0.0.1/8","::1/128"]},"Interface":{"eth0":{"Index":219,"MTU":1500,"Name":"eth0","HardwareAddr":"AkKsFADM","Flags":51,"AltAddrs":null,"Desc":""},"lo":{"Index":1,"MTU":65536,"Name":"lo","HardwareAddr":null,"Flags":37,"AltAddrs":null,"Desc":""}},"HaveV6":false,"HaveV4":true,"IsExpensive":false,"DefaultRouteInterface":"eth0","HTTPProxy":"","PAC":""}
2024/12/14 16:59:38 monitor: [unexpected] new: {"InterfaceIPs":{"eth0":["172.20.0.204/16"],"lo":["127.0.0.1/8","::1/128"],"tailscale0":["fe80::ec04:e05d:5fef:71b8/64"]},"Interface":{"eth0":{"Index":219,"MTU":1500,"Name":"eth0","HardwareAddr":"AkKsFADM","Flags":51,"AltAddrs":null,"Desc":""},"lo":{"Index":1,"MTU":65536,"Name":"lo","HardwareAddr":null,"Flags":37,"AltAddrs":null,"Desc":""},"tailscale0":{"Index":2,"MTU":1280,"Name":"tailscale0","HardwareAddr":null,"Flags":57,"AltAddrs":null,"Desc":""}},"HaveV6":false,"HaveV4":true,"IsExpensive":false,"DefaultRouteInterface":"eth0","HTTPProxy":"","PAC":""}
2024/12/14 16:59:38 logpolicy: using system state directory "/var/lib/tailscale"
2024/12/14 16:59:38 LinkChange: major, rebinding. New state: interfaces.State{defaultRoute=eth0 ifs={eth0:[172.20.0.204/16]} v4=true v6=false}
2024/12/14 16:59:38 onPortUpdate(port=37033, network=udp6)
2024/12/14 16:59:38 onPortUpdate(port=60382, network=udp4)
2024/12/14 16:59:38 got LocalBackend in 36ms
2024/12/14 16:59:38 Start
2024/12/14 16:59:38 Rebind; defIf="eth0", ips=[172.20.0.204/16]
2024/12/14 16:59:38 magicsock: 0 active derp conns
2024/12/14 16:59:38 monitor: gateway and self IP changed: gw=172.20.0.1 self=172.20.0.204
2024/12/14 16:59:39 timeout waiting for initial portlist
2024/12/14 16:59:39 Backend: logs: be:7e20b8b6b181ac13d598f8efa6469a28b7b583adb428c3bc6b418c14782c3a1d fe:
2024/12/14 16:59:39 Switching ipn state NoState -> NeedsLogin (WantRunning=false, nm=false)
2024/12/14 16:59:39 blockEngineUpdates(true)
2024/12/14 16:59:39 health(warnable=wantrunning-false): error: Tailscale is stopped.
2024/12/14 16:59:39 wgengine: Reconfig: configuring userspace WireGuard config (with 0/0 peers)
2024/12/14 16:59:39 wgengine: Reconfig: configuring router
2024/12/14 16:59:39 wgengine: Reconfig: configuring DNS
2024/12/14 16:59:39 dns: Set: {DefaultResolvers:[] Routes:{} SearchDomains:[] Hosts:0}
2024/12/14 16:59:39 dns: Resolvercfg: {Routes:{} Hosts:0 LocalDomains:[]}
2024/12/14 16:59:39 dns: OScfg: {}
boot: 2024/12/14 16:59:39 Running 'tailscale up'
2024/12/14 16:59:39 Start
2024/12/14 16:59:39 generating new machine key
2024/12/14 16:59:39 machine key written to store
2024/12/14 16:59:39 Backend: logs: be:7e20b8b6b181ac13d598f8efa6469a28b7b583adb428c3bc6b418c14782c3a1d fe:
2024/12/14 16:59:39 Switching ipn state NoState -> NeedsLogin (WantRunning=true, nm=false)
2024/12/14 16:59:39 blockEngineUpdates(true)
2024/12/14 16:59:39 health(warnable=warming-up): error: Tailscale is starting. Please wait.
2024/12/14 16:59:39 health(warnable=wantrunning-false): ok
2024/12/14 16:59:39 control: client.Shutdown ...
2024/12/14 16:59:39 control: mapRoutine: exiting
2024/12/14 16:59:39 control: authRoutine: exiting
2024/12/14 16:59:39 control: updateRoutine: exiting
2024/12/14 16:59:39 control: Client.Shutdown done.
2024/12/14 16:59:39 StartLoginInteractive: url=false
2024/12/14 16:59:39 control: client.Login(2)
2024/12/14 16:59:39 control: LoginInteractive -> regen=true
2024/12/14 16:59:39 control: doLogin(regen=true, hasUrl=false)
2024/12/14 16:59:39 control: control server key from https://controlplane.tailscale.com: ts2021=[fSeS+], legacy=[nlFWp]
2024/12/14 16:59:39 control: Generating a new nodekey.
2024/12/14 16:59:39 control: RegisterReq: onode= node=[z/Agu] fup=false nks=false
2024/12/14 16:59:40 control: RegisterReq: got response; nodeKeyExpired=false, machineAuthorized=true; authURL=false
2024/12/14 16:59:40 blockEngineUpdates(false)
2024/12/14 16:59:40 health(warnable=not-in-map-poll): ok
2024/12/14 16:59:40 control: netmap: got new dial plan from control
2024/12/14 16:59:40 active login: mariosffx@gmail.com
2024/12/14 16:59:40 Switching ipn state NeedsLogin -> Starting (WantRunning=true, nm=true)
2024/12/14 16:59:40 magicsock: SetPrivateKey called (init)
2024/12/14 16:59:40 wgengine: Reconfig: configuring userspace WireGuard config (with 0/2 peers)
2024/12/14 16:59:40 wgengine: Reconfig: configuring router
2024/12/14 16:59:40 wgengine: Reconfig: configuring DNS
2024/12/14 16:59:40 dns: Set: {DefaultResolvers:[] Routes:{some-domain.ts.net.:[] ts.net.:[199.247.155.53 2620:111:8007::53]}+65arpa SearchDomains:[some-domain.ts.net.] Hosts:4}
2024/12/14 16:59:40 dns: Resolvercfg: {Routes:{.:[127.0.0.11] ts.net.:[199.247.155.53 2620:111:8007::53]} Hosts:4 LocalDomains:[some-domain.ts.net.]+65arpa}
2024/12/14 16:59:40 dns: OScfg: {Nameservers:[100.100.100.100] SearchDomains:[some-domain.ts.net. station. some-domain.ts.net.] }
2024/12/14 16:59:40 rename of "/etc/resolv.conf" to "/etc/resolv.pre-tailscale-backup.conf" failed (rename /etc/resolv.conf /etc/resolv.pre-tailscale-backup.conf: device or resource busy), falling back to copy+delete
2024/12/14 16:59:40 peerapi: serving on http://100.120.43.120:58525
2024/12/14 16:59:40 peerapi: serving on http://[fd7a:115c:a1e0::f401:2b78]:46914
2024/12/14 16:59:40 magicsock: home is now derp-4 (fra)
2024/12/14 16:59:40 magicsock: adding connection to derp-4 for home-keep-alive
2024/12/14 16:59:40 magicsock: 1 active derp conns: derp-4=cr0s,wr0s
2024/12/14 16:59:40 control: NetInfo: NetInfo{varies=false hairpin= ipv6=false ipv6os=true udp=true icmpv4=false derp=#4 portmap= link="" firewallmode="ipt-default"}
2024/12/14 16:59:40 derphttp.Client.Connect: connecting to derp-4 (fra)
2024/12/14 16:59:40 health(warnable=warming-up): ok
2024/12/14 16:59:40 Switching ipn state Starting -> Running (WantRunning=true, nm=true)
2024/12/14 16:59:40 magicsock: endpoints changed: 91.138.233.223:60382 (stun), 172.20.0.204:60382 (local)
Some peers are advertising routes but --accept-routes is false
boot: 2024/12/14 16:59:40 Startup complete, waiting for shutdown signal
2024/12/14 16:59:40 wgengine: Reconfig: configuring userspace WireGuard config (with 0/3 peers)
2024/12/14 16:59:40 wgengine: Reconfig: configuring router
2024/12/14 16:59:40 wgengine: Reconfig: configuring DNS
2024/12/14 16:59:40 dns: Set: {DefaultResolvers:[] Routes:{some-domain.ts.net.:[] ts.net.:[199.247.155.53 2620:111:8007::53]}+65arpa SearchDomains:[some-domain.ts.net.] Hosts:4}
2024/12/14 16:59:40 dns: Resolvercfg: {Routes:{.:[127.0.0.11] ts.net.:[199.247.155.53 2620:111:8007::53]} Hosts:4 LocalDomains:[some-domain.ts.net.]+65arpa}
2024/12/14 16:59:40 dns: OScfg: {Nameservers:[100.100.100.100] SearchDomains:[some-domain.ts.net. station. some-domain.ts.net.] }
2024/12/14 16:59:40 magicsock: derp-4 connected; connGen=1
2024/12/14 16:59:40 health(warnable=no-derp-home): ok
2024/12/14 16:59:40 [RATELIMIT] format("health(warnable=%s): ok")
2024/12/14 16:59:40 health(warnable=no-derp-connection): ok
2024/12/14 16:59:40 Received error: PollNetMap: unexpected EOF
2024/12/14 16:59:41 control: netmap: got new dial plan from control
2024/12/14 16:59:41 wgengine: Reconfig: configuring router
2024/12/14 16:59:41 wgengine: Reconfig: configuring DNS
2024/12/14 16:59:41 dns: Set: {DefaultResolvers:[] Routes:{some-domain.ts.net.:[] ts.net.:[199.247.155.53 2620:111:8007::53]}+65arpa SearchDomains:[some-domain.ts.net.] Hosts:5}
2024/12/14 16:59:41 dns: Resolvercfg: {Routes:{.:[127.0.0.11] ts.net.:[199.247.155.53 2620:111:8007::53]} Hosts:5 LocalDomains:[some-domain.ts.net.]+65arpa}
2024/12/14 16:59:41 dns: OScfg: {Nameservers:[100.100.100.100] SearchDomains:[some-domain.ts.net. station. some-domain.ts.net.] }
2024/12/14 16:59:41 wgengine: Reconfig: configuring userspace WireGuard config (with 0/4 peers)
2024/12/14 16:59:41 wgengine: Reconfig: configuring router
2024/12/14 16:59:41 [RATELIMIT] format("wgengine: Reconfig: configuring router")
2024/12/14 16:59:41 wgengine: Reconfig: configuring DNS
2024/12/14 16:59:41 [RATELIMIT] format("wgengine: Reconfig: configuring DNS")
2024/12/14 16:59:41 dns: Set: {DefaultResolvers:[] Routes:{some-domain.ts.net.:[] ts.net.:[199.247.155.53 2620:111:8007::53]}+65arpa SearchDomains:[some-domain.ts.net.] Hosts:5}
2024/12/14 16:59:41 [RATELIMIT] format("dns: Set: %v")
2024/12/14 16:59:41 dns: Resolvercfg: {Routes:{.:[127.0.0.11] ts.net.:[199.247.155.53 2620:111:8007::53]} Hosts:5 LocalDomains:[some-domain.ts.net.]+65arpa}
2024/12/14 16:59:41 [RATELIMIT] format("dns: Resolvercfg: %v")
2024/12/14 16:59:41 dns: OScfg: {Nameservers:[100.100.100.100] SearchDomains:[some-domain.ts.net. station. some-domain.ts.net.] }
2024/12/14 16:59:41 [RATELIMIT] format("dns: OScfg: %v")
2024/12/14 16:59:46 wgengine: Reconfig: configuring userspace WireGuard config (with 0/5 peers)
3. Caddy version:
v2.8.4 h1:q3pe0wpBj1OcHFZ3n/1nl4V4bxBrYoSoab7rL9BMYNk=
4. How I installed and ran Caddy:
I am using Docker Compose on Fedora, here are my configuration files:
docker-compose.yaml
name: download_box
networks:
db_net:
driver: bridge
ipam:
driver: default
config:
- subnet: ${SUBNET}
gateway: ${GATEWAY}
services:
torrents:
image: lscr.io/linuxserver/qbittorrent:latest
hostname: torrents
container_name: torrents
restart: always
volumes:
- ./services/torrents/config:/config
networks:
db_net:
ipv4_address: ${TORRENTS_IPV4_ADDRESS}
expose:
- ${TORRENT_HTTP_PORT}
- ${TORRENTING_PORT}
ports:
- ${TORRENT_HTTP_PORT}:${TORRENT_HTTP_PORT}
- ${TORRENTING_PORT}:${TORRENTING_PORT}
environment:
- PUID=${WWW_ID}
- PGID=${ROOT_ID}
- TZ=${TZ}
- WEBUI_PORT=${TORRENT_HTTP_PORT}
- TORRENTING_PORT=${TORRENTING_PORT}
sonarr:
container_name: sonarr
image: lscr.io/linuxserver/sonarr:latest
restart: always
volumes:
- ./services/sonarr/config:/config
networks:
db_net:
ipv4_address: ${SONARR_IPV4_ADDRESS}
expose:
- ${SONARR_HTTP_PORT}
ports:
- ${SONARR_HTTP_PORT}:${SONARR_HTTP_PORT}
environment:
- PUID=${WWW_ID}
- PGID=${ROOT_ID}
- TZ=${TZ}
caddy:
container_name: caddy
build: .
cap_add:
- NET_ADMIN
restart: always
depends_on:
tailscale:
condition: service_started
restart: true
volumes:
- ./static/caddy/etc:/etc/caddy/
- ./services/tailscale/tmp:/var/run/tailscale
networks:
db_net:
ipv4_address: ${CADDY_IPV4_ADDRESS}
ports:
- ${CADDY_HTTP_API_PORT}:${CADDY_HTTP_API_PORT}
- ${CADDY_HTTP_PORT}:${CADDY_HTTP_PORT}
- ${CADDY_HTTPS_PORT}:${CADDY_HTTPS_PORT}
environment:
- PUID=${ROOT_ID}
- PGID=${ROOT_ID}
- TZ=${TZ}
- EMAIL_CONTACT=${EMAIL_CONTACT}
- TAILSCALE_DOMAIN=${TAILSCALE_DOMAIN}
- TAILSCALE_AUTHKEY=${TAILSCALE_AUTHKEY}
env_file:
- ./.env
tailscale:
container_name: tailscale
# image: tailscale/tailscale:v1.72.1
build:
dockerfile: ./TailscaleDockerfile
restart: always
devices:
- /dev/net/tun
volumes:
- /lib/modules:/lib/modules
- ./services/tailscale/tmp:/tmp
- ./services/tailscale/state:/var/lib/tailscale
cap_add:
- net_admin
- sys_module
networks:
db_net:
ipv4_address: ${TAILSCALE_IPV4_ADDRESS}
environment:
- TS_AUTHKEY=${TAILSCALE_AUTHKEY}
- TS_AUTH_ONCE=false
- TS_HOSTNAME=download-box
- TS_ACCEPT_DNS=true
- TS_USERSPACE=false
- TS_STATE_DIR=/var/lib/tailscale
- TSNET_FORCE_LOGIN=1
env
EMAIL_CONTACT=johndoe@email.com
TAILSCALE_DOMAIN=some-domain.ts.net
TAILSCALE_AUTHKEY=tskey-auth-key
WWW_ID=33
ROOT_ID=0
PGID=0
TZ=Europe/Athens
ENV=development
SUBNET=172.20.0.0/16
GATEWAY=172.20.0.1
CADDY_IPV4_ADDRESS=172.20.0.200
TAILSCALE_IPV4_ADDRESS=172.20.0.204
SONARR_IPV4_ADDRESS=172.20.0.205
TORRENTS_IPV4_ADDRESS=172.20.0.207
CADDY_HTTP_API_PORT=2199
CADDY_HTTP_PORT=80
CADDY_HTTPS_PORT=443
SONARR_HTTP_PORT=8989
TORRENT_HTTP_PORT=9191
TORRENTING_PORT=50126
Caddy Dockerfile
FROM caddy:2.8.4-builder AS builder
RUN xcaddy build \
--with github.com/caddy-dns/cloudflare \
--with github.com/tailscale/caddy-tailscale
FROM caddy:2.8.4
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
Tailscale Dockerfile
FROM tailscale/tailscale:v1.72.1
RUN apk update
RUN apk add nftables
RUN rm -f /sbin/iptables
RUN ln -s /sbin/xtables-nft-multi /sbin/iptables
RUN rm -f /sbin/ip6tables
RUN ln -s /sbin/xtables-nft-multi /sbin/ip6tables
a. System environment:
Operating System: Fedora Linux 41 (KDE Plasma)
Kernel: Linux 6.11.11-300.fc41.x86_64
Architecture: x86-64
b. Command:
docker compose up --build
d. My complete Caddy config:
Caddyfile
{
email {env.EMAIL_CONTACT}
tailscale {
auth_key {env.TAILSCALE_AUTHKEY}
ephemeral
}
}
(service) {
bind tailscale/{args[0]}
tls {
get_certificate tailscale
}
tailscale_auth
reverse_proxy {args[0]}:{args[1]} {
header_up X-Webauth-User {http.auth.user.tailscale_login}
header_up X-Tailscale-Tailnet {http.auth.user.tailscale_tailnet}
}
}
(local_service) {
bind tailscale/{args[0]}
tls {
get_certificate tailscale
}
tailscale_auth
reverse_proxy {args[1]}:{args[2]} {
header_up X-Webauth-User {http.auth.user.tailscale_login}
header_up X-Tailscale-Tailnet {http.auth.user.tailscale_tailnet}
}
}
https://caddy.{$TAILSCALE_DOMAIN} {
import service caddy {$CADDY_HTTP_API_PORT}
}
https://sonarr.{$TAILSCALE_DOMAIN} {
import service sonarr {$SONARR_HTTP_PORT}
}
https://torrents.{$TAILSCALE_DOMAIN} {
import service torrents {$TORRENT_HTTP_PORT}
}