settings for deploy

feature/docker
Dmitriy Shesterkin 9 years ago
parent 4d1bd790a6
commit 26a1be29a2
  1. 51
      Dockerfile
  2. 7
      conf/docker/entrypoint_production.sh
  3. 44
      conf/gunicorn_logging.ini
  4. 67
      conf/gunicorn_prod.py
  5. 60
      conf/nginx.conf
  6. 70
      conf/nginx.stage.conf.template
  7. 44
      conf/supervisor.conf
  8. 69
      docker-compose.local.yml
  9. 87
      docker-compose.yml

@ -1,8 +1,45 @@
FROM python:3.6
FROM alpine:3.6
ENV PYTHONUNBUFFERED 1
RUN mkdir -p /code/public/
WORKDIR /code
COPY requirements /code/requirements
RUN apt-get update && apt-get -y install ghostscript && apt-get clean && rm -rf /var/lib/apt/lists/*
RUN pip install --upgrade pip && pip install -r requirements/local.txt
ADD . /code/
COPY requirements /opt/app/requirements
RUN apk update && apk add --update --no-cache --progress \
make \
libpq \
nginx \
libmagic \
supervisor \
ghostscript \
zlib jpeg libpng freetype \
bash bash-completion && \
apk add --no-cache --virtual=.build-dependencies \
musl-dev \
build-base \
postgresql-dev \
ca-certificates \
openssl \
zlib-dev jpeg-dev libpng-dev freetype-dev \
python-dev && \
python -m ensurepip && \
rm -r /usr/lib/python*/ensurepip && \
pip install --upgrade pip setuptools && \
pip install --no-cache-dir -r /opt/app/requirements/production.txt && \
# Nginx
chown -R nginx:www-data /var/lib/nginx && \
# Delete build dependencies after use
apk del .build-dependencies && \
rm -r /root/.cache
COPY . /opt/app
VOLUME ["/opt/app/public/media"]
WORKDIR /opt/app
RUN rm -v /etc/nginx/nginx.conf
ADD /conf/nginx.conf /etc/nginx/
EXPOSE 80
CMD ["conf/docker/entrypoint_production.sh"]

@ -10,7 +10,8 @@ python src/manage.py migrate makemigrations
# Apply database migrations
echo "Apply database migrations"
python src/manage.py migrate myauth
python src/manage.py migrate
python src/manage.py migrate myauth --noinput
python src/manage.py migrate --noinput
supervisord -c /opt/app/conf/supervisor.conf
exec "$@"

@ -0,0 +1,44 @@
[loggers]
keys=root,gunicorn.access,gunicorn.error
[logger_root]
level=INFO
handlers=root
[logger_gunicorn.access]
level=INFO
handlers=gunicorn.access
qualname=gunicorn.access
propagate=0
[logger_gunicorn.error]
level=INFO
handlers=gunicorn.error
qualname=gunicorn.error
propagate=0
[handlers]
keys=root,gunicorn.access,gunicorn.error
[handler_root]
class=logging.StreamHandler
formatter=default
args=(sys.stdout,)
[handler_gunicorn.access]
class=logging.StreamHandler
formatter=default
args=(sys.stdout,)
[handler_gunicorn.error]
class=logging.StreamHandler
formatter=default
args=(sys.stdout,)
[formatters]
keys=default
[formatter_default]
class=logging.Formatter
format=* %(asctime)s [%(levelname)s] {%(filename)s} - %(message)s
datefmt=%x %X

@ -0,0 +1,67 @@
# Settings http://docs.gunicorn.org/en/stable/settings.html
import os
bind = 'unix:/tmp/gunicorn.sock'
backlog = 2048 # The number of pending connections
preload = True # Load application code before the worker processes are forked
workers = 2
worker_class = 'sync'
worker_connections = 1000
threads = 1
timeout = 120
keepalive = 2
reload = False
spew = False
check_config = False
daemon = False
pidfile = None
umask = 0
user = None
group = None
tmp_upload_dir = None
proc_name = None
# Logging
# -------
logconfig = '/opt/app/conf/gunicorn_logging.ini'
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
def when_ready(server):
server.log.info("Server is ready. Spawning workers")
def worker_int(worker):
worker.log.info("Worker received INT or QUIT signal")
## get traceback info
import threading, sys, traceback
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# Thread: %s(%d)" % (id2name.get(threadId,""),
threadId))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename,
lineno, name))
if line:
code.append(" %s" % (line.strip()))
worker.log.debug("\n".join(code))
def worker_abort(worker):
worker.log.info("Worker received SIGABRT signal")

@ -0,0 +1,60 @@
worker_processes 1;
user nginx;
pid /var/run/nginx.pid;
error_log /var/log/nginx/error.log;
events {
worker_connections 1024;
accept_mutex off;
use epoll;
}
http {
sendfile on;
include /etc/nginx/mime.types;
default_type application/octet-stream;
keepalive_timeout 65;
upstream django {
server unix:/tmp/gunicorn.sock fail_timeout=0;
}
server {
listen 80;
server_name _;
charset utf-8;
keepalive_timeout 5;
client_max_body_size 64M;
access_log /var/log/nginx/access.log;
location /media {
alias /opt/app/public/media;
expires 30d;
}
location /static {
alias /opt/app/public/static;
expires 30d;
}
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# we don't want nginx trying to do something clever with
# redirects, we set the Host: header above already.
proxy_redirect off;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
proxy_http_version 1.1;
proxy_pass http://django;
proxy_pass_header Server;
}
}
}

@ -1,70 +0,0 @@
server {
listen 80;
server_name {{ project }} www.{{ project }};
if ($http_x_forwarded_proto = "http") {
return 301 https://$server_name$request_uri;
}
# Set the charset
charset utf-8;
# Set the max size for file uploads to 10Mb
client_max_body_size 10M;
access_log /var/log/nginx/{{ project }}-access.log;
error_log /var/log/nginx/{{ project }}-error.log;
gzip on;
gzip_disable "msie6";
gzip_types
text/plain
application/atom+xml
application/rss+xml
application/vnd.ms-fontobject
application/x-font-ttf
application/x-web-app-manifest+json
font/opentype
image/x-icon
application/xhtml+xml
text/css
application/json
image/svg+xml
application/x-javascript
text/xml
application/xml
application/xml+rss
text/javascript
application/javascript
text/x-component;
location /static/ {
root /home/{{ user }}/projects/{{ project }}/public;
expires 30d;
autoindex on;
}
location /media/ {
root /home/{{ user }}/projects/{{ project }}/public;
expires 30d;
autoindex on;
}
location /robots.txt {
alias /home/{{ user }}/projects/{{ project }}/src/templates/robots.txt;
}
location / {
proxy_pass http://127.0.0.1: {{ port }};
proxy_set_header Host $server_name;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 32M;
client_body_buffer_size 512k;
proxy_connect_timeout 300;
proxy_send_timeout 90;
proxy_read_timeout 4000;
proxy_buffers 32 4k;
}
}

@ -0,0 +1,44 @@
[supervisord]
logfile=/var/log/supervisord.log
loglevel=debug
directory=/opt/app
pidfile=/tmp/supervisord.pid
nodaemon=true
minfds=65535
minprocs=200
environment=PATH="/opt/app"
user=root
[program:nginx]
command=/usr/sbin/nginx "-g" "daemon off;"
priority=1
autorestart=true
stdout_events_enabled=true
stderr_events_enabled=true
[program:gunicorn]
command=gunicorn src.wsgi:application -c /opt/app/conf/gunicorn_prod.py
priority=2
stdout_logfile=/var/log/gunicorn.log
redirect_stderr=true
stdout_events_enabled=true
stderr_events_enabled=true
autorestart=true
[program:celeryd]
command=/opt/app/src/celery -A dokumentor worker -l info -E
stdout_logfile=/var/log/celery-worker.log
stderr_logfile=/var/log/celery-worker.log
autostart=true
autorestart=true
startsecs=10
stopwaitsecs=600
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
redirect_stderr=true
; RQ requires the TERM signal to perform a warm shutdown. If RQ does not die
; within 10 seconds, supervisor will forcefully kill it
stopsignal=TERM

@ -1,69 +0,0 @@
version: '2'
services:
db:
restart: always
image: postgres
environment:
POSTGRES_USER: 'dokumentor'
POSTGRES_PASSWORD: 'dokumentor'
volumes:
- ./data:/var/lib/postgresql/data
rabbitmq:
restart: always
image: rabbitmq:latest
environment:
RABBITMQ_DEFAULT_USER: 'dokumentor'
RABBITMQ_DEFAULT_PASS: 'dokumentor'
ports:
- "8080:15672"
- "5672:5672"
- "5671:5671"
celery:
restart: always
build:
context: .
dockerfile: Dockerfile
command: bash -c 'cd src && celery -A dokumentor worker -l info -E'
volumes:
- .:/code
env_file: conf/env
environment:
DJANGO_DB: 'postgres://dokumentor:dokumentor@db:5432/dokumentor'
REDIS_URL: 'redis://redis:6379/1'
CELERY_BROKER_URL: 'amqp://dokumentor:dokumentor@rabbitmq:5672//'
CELERY_RESULT_BACKEND: 'amqp://dokumentor:dokumentor@rabbitmq:5672//'
links:
- db
- rabbitmq
- redis
redis:
restart: always
image: redis:latest
ports:
- '6379:6379'
web:
restart: always
build:
context: .
dockerfile: Dockerfile
command: bash -c 'python src/manage.py runserver 0.0.0.0:8000'
volumes:
- .:/code
entrypoint: conf/docker/entrypoint.sh
ports:
- "8000:8000"
env_file: conf/env
environment:
DJANGO_DB: 'postgres://dokumentor:dokumentor@db:5432/dokumentor'
REDIS_URL: 'redis://redis:6379/1'
CELERY_BROKER_URL: 'amqp://dokumentor:dokumentor@rabbitmq:5672//'
CELERY_RESULT_BACKEND: 'amqp://dokumentor:dokumentor@rabbitmq:5672//'
depends_on:
- db
- rabbitmq
- redis

@ -1,68 +1,43 @@
version: '2'
services:
db:
restart: always
image: postgres
environment:
POSTGRES_USER: 'dokumentor'
POSTGRES_PASSWORD: 'dokumentor'
volumes:
- ./data:/var/lib/postgresql/data
rabbitmq:
restart: always
image: rabbitmq:latest
environment:
RABBITMQ_DEFAULT_USER: 'dokumentor'
RABBITMQ_DEFAULT_PASS: 'dokumentor'
expose:
- "5672"
- "5671"
celery:
restart: always
build:
context: .
dockerfile: Dockerfile
command: bash -c 'cd src && celery -A dokumentor worker -l info -E'
web:
build: .
volumes:
- .:/code
- ./:/opt/app
ports:
- "80:80"
env_file: conf/env
environment:
DJANGO_DB: 'postgres://dokumentor:dokumentor@db:5432/dokumentor'
REDIS_URL: 'redis://redis:6379/1'
CELERY_BROKER_URL: 'amqp://dokumentor:dokumentor@rabbitmq:5672//'
CELERY_RESULT_BACKEND: 'amqp://dokumentor:dokumentor@rabbitmq:5672//'
links:
- DJANGO_DB=postgres://dokumentor:dokumentor@db:5432/dokumentor
- REDIS_URL=redis://redis:6379/1
- CELERY_BROKER_URL=amqp://dokumentor:dokumentor@rabbitmq:5672//
- CELERY_RESULT_BACKEND=amqp://dokumentor:dokumentor@rabbitmq:5672//
depends_on:
depends_on:
- db
- rabbitmq
- redis
- rabbitmq
db:
image: postgres:latest
expose:
- "5432"
volumes:
- ./data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=dokumentor
- POSTGRES_PASSWORD=dokumentor
redis:
restart: always
image: redis:latest
image: redis:alpine
expose:
- '6379'
- "6379"
web:
restart: always
build:
context: .
dockerfile: Dockerfile
command: bash -c 'python src/manage.py runserver 0.0.0.0:8000'
volumes:
- .:/code
entrypoint: conf/docker/entrypoint_production.sh
ports:
- "18001:8000"
env_file: conf/env
rabbitmq:
image: rabbitmq:alpine
environment:
DJANGO_DB: 'postgres://dokumentor:dokumentor@db:5432/dokumentor'
REDIS_URL: 'redis://redis:6379/1'
CELERY_BROKER_URL: 'amqp://dokumentor:dokumentor@rabbitmq:5672//'
CELERY_RESULT_BACKEND: 'amqp://dokumentor:dokumentor@rabbitmq:5672//'
depends_on:
- db
- rabbitmq
- redis
- RABBITMQ_DEFAULT_USER=dokumentor
- RABBITMQ_DEFAULT_PASS=dokumentor
expose:
- "5672"
- "5671"

Loading…
Cancel
Save