1. The problem I’m having:
I have two issues, one is related to read / write buffers tailored to NextJS and the other is related to rate limiting. We are running Caddy behind another loadbalancer (managed loadbalancer from a cloud provider) and routing traffic to 4 containers running NextJS.
-
Rate limiting:
I have been tinkering with rate limiting because we get hammered by bots sometime and the Google bot parts seem to work fine, but the “normal users” part is currently not working (thats why its commented out), we get rate limited to fast and I think this is due to the initial request triggering multiple other requests so the rate limit hits to soon. Is it possible to exclude specifics paths? -
Read / Write buffers / settings for NextJS.
I have been trying to figure out what the “optimal” settings are for a reverse proxy and nextjs but have come up a bit short. Are the values we are using fine? Could they be tweaked?
2. Error messages and/or full log output:
No errors.
3. Caddy version:
v2.9.1 h1:OEYiZ7DbCzAWVb6TNEkjRcSCRGHVoZsJinoDR/n9oaY=
4. How I installed and ran Caddy:
We have installed caddy using docker and a image we built ourselves that includes GitHub - mholt/caddy-ratelimit: HTTP rate limiting module for Caddy 2
a. System environment:
Ubuntu 24.04, Docker version 28.0.4, build b8034c0
16 Cores, 24gb memory
c. Service/unit/compose file:
Our docker compose
services:
caddy:
container_name: caddy
image: ghcr.io/<redacted>/caddy:latest
restart: always
ports:
- "80:80"
volumes:
- "./Caddyfile:/etc/caddy/Caddyfile:ro"
- "www:/var/www/goaccess/:ro"
- "log:/var/log/caddy/"
deploy:
resources:
limits:
cpus: '4'
memory: 3G
reservations:
cpus: '2'
memory: 3G
depends_on:
- app-1
- app-2
- app-3
- app-4
networks:
- app_network
app-1:
container_name: <redacted>-app-1
image: ${image}
restart: always
env_file: /root/.env
environment:
- INSTANCE_ID=1
expose:
- "3000"
networks:
- app_network
volumes: []
deploy:
resources:
limits:
cpus: '3'
memory: 5G
reservations:
cpus: '3'
memory: 5G
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://0.0.0.0:3000/api/health" ]
interval: 15s
timeout: 5s
retries: 3
start_period: 45s
app-2:
container_name: <redacted>-app-2
image: ${image}
restart: always
env_file: /root/.env
environment:
- INSTANCE_ID=2
expose:
- "3000"
networks:
- app_network
volumes: []
deploy:
resources:
limits:
cpus: '3'
memory: 5G
reservations:
cpus: '3'
memory: 5G
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://0.0.0.0:3000/api/health" ]
interval: 15s
timeout: 5s
retries: 5
start_period: 30s
app-3:
container_name: <redacted>-app-3
image: ${image}
restart: always
env_file: /root/.env
environment:
- INSTANCE_ID=3
expose:
- "3000"
networks:
- app_network
volumes: []
deploy:
resources:
limits:
cpus: '3'
memory: 5G
reservations:
cpus: '3'
memory: 5G
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://0.0.0.0:3000/api/health" ]
interval: 15s
timeout: 5s
retries: 5
start_period: 30s
app-4:
container_name: <redacted>-app-4
image: ${image}
restart: always
env_file: /root/.env
environment:
- INSTANCE_ID=4
expose:
- "3000"
networks:
- app_network
volumes: []
deploy:
resources:
limits:
cpus: '3'
memory: 5G
reservations:
cpus: '3'
memory: 5G
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://0.0.0.0:3000/api/health" ]
interval: 15s
timeout: 5s
retries: 5
start_period: 30s
networks:
app_network:
name: app_network
driver: bridge
volumes:
www:
log:
d. My complete Caddy config:
{
admin off
auto_https off
servers {
listener_wrappers {
proxy_protocol {
timeout 10s
allow 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 127.0.0.0/8
}
}
timeouts {
read_body 30s
read_header 30s
write 30s
idle 120s
}
trusted_proxies static private_ranges
}
}
:80 {
@googlebot {
header User-Agent *Googlebot*
}
handle /api/health {
reverse_proxy app-1:3000 app-2:3000 app-3:3000 app-4:3000 {
health_uri /api/health
health_interval 15s
health_timeout 5s
health_status 200
transport http {
read_buffer 128KB
write_buffer 128KB
max_response_header 128KB
}
}
}
handle @googlebot {
rate_limit {
zone googlebot_zone {
key {http.request.remote_ip}
events 15
window 1m
status_code 429
}
}
reverse_proxy app-1:3000 app-2:3000 app-3:3000 app-4:3000 {
lb_policy round_robin
health_uri /api/health
health_interval 15s
health_timeout 5s
health_status 200
fail_duration 5s
max_fails 3
unhealthy_status 503
transport http {
dial_timeout 30s
response_header_timeout 30s
keepalive 30s
keepalive_idle_conns 10
max_response_header 2MB
read_buffer 512KB
write_buffer 512KB
}
}
}
handle {
# Higher limit for normal users
# rate_limit {
# zone normal_users_zone {
# key {http.request.client_ip}
# events 30 # Total should be 120 since we have 4 servers running caddy.
# window 1m
# status_code 429
# }
# }
reverse_proxy app-1:3000 app-2:3000 app-3:3000 app-4:3000 {
lb_policy round_robin
health_uri /api/health
health_interval 15s
health_timeout 5s
health_status 200
fail_duration 5s
max_fails 3
unhealthy_status 503
transport http {
dial_timeout 30s
response_header_timeout 30s
keepalive 30s
keepalive_idle_conns 10
max_response_header 10MB
read_buffer 2MB
write_buffer 128KB
}
}
}
log {
format json
output file /var/log/caddy/access.log {
roll_size 10gb
roll_keep 10
roll_keep_for 2160h
}
}
handle_errors {
@is_rate_limited expression {http.error.status_code} == 429
handle @is_rate_limited {
respond "Rate limit exceeded. Please try again later." 429 {
close
}
}
handle {
respond "Service temporarily unavailable. Please try again." 503
}
}
}