Hi,
I am using docker compose yml and i have 3 services:
- postgres db
- python django application
- nginx as reverse proxy
when i rebuild using docker compose build i see that the docker remove containers before the status of new container change to healthy because of which i face down time.
here is docker compose yml file
services:
db:
image: postgres:latest
container_name: postgresserver
restart: always
env_file:
- .env
volumes:
- database-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: ${PROJECT_DB_USER} # Username
POSTGRES_PASSWORD: ${PROJECT_DB_PASSWORD} # Replace with a strong password
POSTGRES_DB: ${PROJECT_DB_DB} # Database name
POSTGRES_PORT: ${PROJECT_DB_PORT} # Port
expose:
- "${PROJECT_DB_PORT}"
healthcheck:
test: ["CMD", "pg_isready", "-d", "${PROJECT_DB_DB}"]
interval: 10s # Check every 10 seconds
timeout: 5s # Maximum wait time for container to be healthy
retries: 5 # Retry up to 5 times before marking unhealthy
backend:
image: infinitymailer_backend:latest
build:
context: .
dockerfile: Dockerfile
container_name: InfinityMailerBackendContainer
env_file:
- .env
environment:
PROJECT_DB_HOST: db
command: ["sh", "-c", "python3 manage.py makemigrations GAuthApp API MassSender && python3 manage.py migrate --noinput && python3 manage.py collectstatic --noinput && gunicorn --bind :8000 --workers 3 InfinityMailer.wsgi:application"]
restart: always
volumes:
- ./media:/InfinityMailer/media
- ./templates:/InfinityMailer/templates
healthcheck:
test: ["CMD", "python3" ,"manage.py", "check", "--database","default"]
timeout: 8s
retries: 5
expose:
- 8000
depends_on:
db:
condition: service_healthy
develop:
watch:
- action: rebuild
path: ./Dockerfile
- action: sync+restart
path: ./static/
target: InfinityMailer/static
- action: rebuild
path: .env
- action: sync+restart
path: ./InfinityMailer/
target: InfinityMailer/InfinityMailer/
ignore:
- InfinityMailer/__pycache__/
- InfinityMailer/migrations/
- action: sync+restart
path: ./GAuthApp/
target: InfinityMailer/GAuthApp/
ignore:
- GAuthApp/__pycache__/
- GAuthApp/migrations/
- action: sync+restart
path: ./API/
target: InfinityMailer/API/
ignore:
- API/__pycache__/
- API/migrations/
- action: sync+restart
path: ./MassSender/
target: InfinityMailer/MassSender/
ignore:
- MassSender/__pycache__/
- MassSender/migrations/
- action: rebuild
path: ./requirements.txt
nginx:
image: infinitymailer_nginx:stable
build:
context: ./nginx/
dockerfile: Dockerfile
container_name: nginxserver
ports:
- 80:80
- 443:443
env_file:
- .env-nginx
environment:
NGINX_ENVSUBST_TEMPLATE_SUFFIX: ".conf"
volumes:
- ./nginx/ssl_certificate:/etc/letsencrypt/live/
- ./staticfiles:/opt/djangofiles/staticfiles
- ./media:/opt/djangofiles/media
depends_on:
backend:
condition: service_healthy
develop:
watch:
- action: rebuild
path: nginx/Dockerfile
- action: rebuild
path: .env-nginx
- action: sync+restart
path: nginx/conf.d
target: /etc/nginx/conf.d/
volumes:
database-data:
i am also using docker watch.
when docker watch trigger rebuild the containers are build and the my server goes down untill the new containers status change to healthy.
Is there any way to avoid the downtime.