Caddy display only congratulation page with django

Hello,

1. The problem I’m having:

i just deployed django with caddy via django-compose using template with link below and caddy only displaying congratulation page, i dont know why but dont want reverse to django server

2. Error messages and/or full log output:

-caddy-1  | {"level":"info","ts":1679145922.4561706,"msg":"using provided configuration","config_file":"/etc/caddy/Caddyfile","config_adapter":"caddyfile"}
-caddy-1  | {"level":"info","ts":1679145922.4737067,"logger":"admin","msg":"admin endpoint started","address":"localhost:2019","enforce_origin":false,"origins":["//[::1]:201                                                 
9","//127.0.0.1:2019","//localhost:2019"]}
-caddy-1  | {"level":"warn","ts":1679145922.4744105,"logger":"http","msg":"server is listening only on the HTTP port, so no automatic HTTPS will be applied to this server","                                                   server_name":"srv0","http_port":80}
-caddy-1  | {"level":"info","ts":1679145922.475156,"logger":"http.log","msg":"server running","name":"srv0","protocols":["h1","h2","h3"]}
-caddy-1  | {"level":"info","ts":1679145922.4756062,"msg":"autosaved config (load with --resume flag)","file":"/config/caddy/autosave.json"}
-caddy-1  | {"level":"info","ts":1679145922.4758577,"msg":"serving initial configuration"}
-caddy-1  | {"level":"info","ts":1679145922.4764495,"logger":"tls.cache.maintenance","msg":"started background certificate maintenance","cache":"0xc0003e82a0"}
-caddy-1  | {"level":"info","ts":1679145922.4767337,"logger":"tls","msg":"cleaning storage unit","description":"FileSt

3. Caddy version:

Caddy version caddy:2.6.2

4. How I installed and ran Caddy:

Installed using docker compose on debian 10

c. Service/unit/compose file:

docker-compose

---

# Default compose file for development and production.
# Should be used directly in development.
# Automatically loads `docker-compose.override.yml` if it exists.
# No extra steps required.
# Should be used together with `docker/docker-compose.prod.yml`
# in production.

version: "3.8"
services:
  db:
    image: "postgres:15-alpine"
    restart: unless-stopped
    volumes:
      - pgdata:/var/lib/postgresql/data
    networks:
      - webnet
    env_file: ./config/.env

#  caches:
#    image: "redis:latest"
#    restart: unless-stopped
#    command: /bin/sh -c "redis-server --save 20 1 --loglevel warning --requirepass $$REDIS_HOST_PASSWORD"
#    volumes:
#      - caches:/var/lib/redis/data
#    env_file: ./config/.env
#    ports:
#      - '6379:6379'
#    environment:
#      - REDIS_DISABLE_COMMANDS=FLUSHDB,FLUSHALL,CONFIG

  web:
    <<: &web
      # Image name is changed in production:
      image: "dapiombackend:dev"
      build:
        target: development_build
        context: .
        dockerfile: ./docker/django/Dockerfile
        args:
          DJANGO_ENV: development
        cache_from:
          - "dapiombackend:dev"
          - "dapiombackend:latest"
          - "*"

      volumes:
        - django-static:/var/www/django/static
      depends_on:
        - db
      networks:
        - webnet
      env_file: ./config/.env
      environment:
        DJANGO_DATABASE_HOST: db

    command: python -Wd manage.py runserver 0.0.0.0:8000
    healthcheck:
      # We use `$$` here because:
      # one `$` goes to shell,
      # one `$` goes to `docker-compose.yml` escaping
      test: |
        /usr/bin/test $$(
          /usr/bin/curl --fail http://localhost:8000/health/?format=json
          --write-out "%{http_code}" --silent --output /dev/null
        ) -eq 200
      interval: 10s
      timeout: 5s
      retries: 5
      start_period: 30s

# This task is an example of how to extend existing ones:
#   some_worker:
#     <<: *web
#     command: python manage.py worker_process

networks:
  # Network for your internals, use it by default:
  webnet:

volumes:
  pgdata:
  django-static:

docker-compose.prod

---

# This compose-file is production only. So, it should not be called directly.
#
# Instead, it should be a part of your deployment strategy.
# This setup is supposed to be used with `docker-swarm`.
# See `./docs/pages/template/production.rst` docs.

version: "3.8"
services:
  caddy:
    image: "caddy:2.6.2"
    restart: unless-stopped
    env_file: ./config/.env
    volumes:
      - ./docker/caddy/Caddyfile:/srv/Caddyfile  # configuration
      - ./docker/caddy/ci.sh:/etc/ci.sh  # test script
      - caddy-config:/config  # configuration autosaves
      - caddy-data:/data  # saving certificates
      - django-static:/var/www/django/static  # serving django's statics
      - django-media:/var/www/django/media  # serving django's media
    ports:
      - "80:80"
      - "443:443"
    depends_on:
      - web
    networks:
      - proxynet

#  caches:
#    image: "redis:latest"
#    restart: unless-stopped
#    command: /bin/sh -c "redis-server --save 20 1 --loglevel warning --requirepass $$REDIS_HOST_PASSWORD"
#    volumes:
#      - caches:/var/lib/redis/data
#    env_file: ./config/.env
#    ports:
#      - '6379:6379'
#    environment:
#      - REDIS_DISABLE_COMMANDS=FLUSHDB,FLUSHALL,CONFIG

  web:

    # Image for production:
    image: "dapiombackend:latest"
    build:
      target: production_build
      args:
        DJANGO_ENV: production

    restart: unless-stopped
    volumes:
      - django-media:/var/www/django/media  # since in dev it is app's folder
      - django-locale:/code/locale  # since in dev it is app's folder

    command: bash ./docker/django/gunicorn.sh
    networks:
      - proxynet
    ports:
      - "8000:80"
    expose:
      - 8000

# This task is an example of how to extend existing ones:
#   some_worker:
#     <<: *web
#     command: python manage.py worker_process
#     deploy:
#       replicas: 2

networks:
  # Network for your proxy server and application to connect them,
  # do not use it for anything else!
  proxynet:

volumes:
  django-media:
  django-locale:
  caddy-config:
  caddy-data:

d. My complete Caddy config:

# See https://caddyserver.com/docs

# Email for Let's Encrypt expiration notices
{
	email {$TLS_EMAIL}
}

# "www" redirect to "non-www" version
www.{$DOMAIN_NAME} {
	redir https://{$DOMAIN_NAME}{uri}
}

{$DOMAIN_NAME} {
	# HTTPS options:
	header Strict-Transport-Security max-age=31536000;

	# Removing some headers for improved security:
	header -Server

	# Serve static files
	handle_path /static/* {
		# STATIC_ROOT
		root * /var/www/django/static

		file_server {
			# Staticfiles are pre-compressed in `gunicorn.sh`
			precompressed br gzip
		}
	}

	# Serve media files
	handle_path /media/* {
		# MEDIA_ROOT
		root * /var/www/django/media

		file_server
	}

	# Serve Django app
	handle {
		reverse_proxy web:8000
	}

	# Dynamically compress response with gzip when it makes sense.
	# This setting is ignored for precompressed files.
	encode gzip

	# Logs:
	log {
		output stdout
	}
}

5. Links to relevant resources:

wemake-django-template/{{cookiecutter.project_name}} at master · wemake-services/wemake-django-template · GitHub this one template and configuration for docker and caddy.

edited

Please fill out the help template so we can help you

2 Likes

There, the Caddyfile used in the container is actually in /etc/caddy/Caddyfile not where you specified. Fix the mount path and it should work.

1 Like

This doesn’t improve security at all, btw.

You need to use the internal port in Caddy, i.e. port 80, not port 8000. You can also remove that port mapping in your docker-compose.yml, it’s not needed.

1 Like

This topic was automatically closed 30 days after the last reply. New replies are no longer allowed.