SWARM multi node multi container compatible
The idea: - all nodes generate their config in /etc/nginx/node.conf.d/HOSTNAME.conf - a python script can read all configs and merge them into one and reload nginx - using entr in Procfile, anytime any of these config change, they are merged immediately - the default nginx reload on notify is being removed to prevent from reloading twice
This commit is contained in:
parent
99868d27f2
commit
b395e3894e
6 changed files with 95 additions and 7 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,3 +1,4 @@
|
|||
**/__pycache__/
|
||||
**/.cache/
|
||||
.idea/
|
||||
venv
|
18
Dockerfile
18
Dockerfile
|
@ -1,18 +1,26 @@
|
|||
FROM nginx:1.17.8
|
||||
LABEL maintainer="Jason Wilder mail@jasonwilder.com"
|
||||
MAINTAINER Jonathan Adami <contact@jadami.com>
|
||||
LABEL creator="Jason Wilder mail@jasonwilder.com"
|
||||
|
||||
# Install wget and install/updates certificates
|
||||
# Install python with crossplane, wget, entr and install/updates certificates
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y -q --no-install-recommends \
|
||||
ca-certificates \
|
||||
wget \
|
||||
entr \
|
||||
python3 python3-pip \
|
||||
&& pip3 crossplane \
|
||||
&& apt-get purge -y python3-pip \
|
||||
&& apt autoremove -y
|
||||
&& apt-get clean \
|
||||
&& rm -r /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Configure Nginx and apply fix for very long server names
|
||||
RUN echo "daemon off;" >> /etc/nginx/nginx.conf \
|
||||
&& sed -i 's/worker_processes 1/worker_processes auto/' /etc/nginx/nginx.conf
|
||||
&& sed -i 's/worker_processes 1/worker_processes auto/' /etc/nginx/nginx.conf \
|
||||
&& rm /etc/nginx/conf.d/default.conf \
|
||||
&& mkdir /etc/nginx/node.conf.d \
|
||||
&& echo "http { include ./*.conf; }" > /etc/nginx/node.conf.d/swarm.conf
|
||||
|
||||
# Install Forego
|
||||
ADD https://github.com/jwilder/forego/releases/download/v0.16.1/forego /usr/local/bin/forego
|
||||
|
@ -31,7 +39,7 @@ WORKDIR /app/
|
|||
|
||||
ENV DOCKER_HOST unix:///tmp/docker.sock
|
||||
|
||||
VOLUME ["/etc/nginx/certs", "/etc/nginx/dhparam", "/etc/nginx/static_files"]
|
||||
VOLUME ["/etc/nginx/certs", "/etc/nginx/dhparam", "/etc/nginx/static_files", "/etc/nginx/node.conf.d"]
|
||||
|
||||
ENTRYPOINT ["/app/docker-entrypoint.sh"]
|
||||
CMD ["forego", "start", "-r"]
|
||||
|
|
3
Procfile
3
Procfile
|
@ -1,2 +1,3 @@
|
|||
dockergen: docker-gen -watch -notify "nginx -s reload" /app/nginx.tmpl /etc/nginx/conf.d/default.conf
|
||||
swarmmerge: while true; do ls -d /etc/nginx/node.conf.d/*.conf | entr -d python3 /app/mergeswarm.py; done
|
||||
dockergen: docker-gen -watch /app/nginx.tmpl /etc/nginx/node.conf.d/`hostname`.conf
|
||||
nginx: nginx
|
||||
|
|
28
mergeswarm.py
Normal file
28
mergeswarm.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from crossplane import parse, build
|
||||
|
||||
SWARM_CONFIG_FILE = '/etc/nginx/node.conf.d/swarm.conf'
|
||||
NGINX_OUTPUT = '/etc/nginx/conf.d/default'
|
||||
NGINX_RELOAD = 'nginx -s reload'
|
||||
|
||||
nginx_config = []
|
||||
swarm_config = parse(SWARM_CONFIG_FILE)['config']
|
||||
nodes = [f['parsed'] for f in swarm_config[1:-1]]
|
||||
|
||||
for node in nodes:
|
||||
for statement in node:
|
||||
if statement not in nginx_config:
|
||||
nginx_config.append(statement)
|
||||
|
||||
with open(NGINX_OUTPUT, 'w') as f:
|
||||
f.write(build(nginx_config))
|
||||
|
||||
process = subprocess.Popen(NGINX_RELOAD.split(), stdout=subprocess.PIPE)
|
||||
output, error = process.communicate()
|
||||
if output:
|
||||
sys.stdout.write(output)
|
||||
if error:
|
||||
sys.stderr.write(error)
|
|
@ -13,7 +13,7 @@
|
|||
{{/* If there is no swarm node or the port is not published on host, use container's IP:PORT */}}
|
||||
{{ else if .Network }}
|
||||
# {{ .Container.Name }}
|
||||
server {{ .Network.IP }}:{{ .Address.Port }};
|
||||
server {{ (first (split .Container.Name "." )) }}:{{ .Address.Port }};
|
||||
{{ end }}
|
||||
{{ else if .Network }}
|
||||
# {{ .Container.Name }}
|
||||
|
|
50
swarm.stack.yml
Normal file
50
swarm.stack.yml
Normal file
|
@ -0,0 +1,50 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
|
||||
proxy:
|
||||
deploy:
|
||||
mode: global
|
||||
build:
|
||||
context: .
|
||||
image: test-proxy:1.0
|
||||
networks:
|
||||
- proxy
|
||||
ports:
|
||||
- "80:80"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
- /tmp/proxyconf:/var/nginx/node.conf.d/
|
||||
|
||||
whoami1:
|
||||
deploy:
|
||||
replicas: 1
|
||||
image: jwilder/whoami
|
||||
networks:
|
||||
- proxy
|
||||
environment:
|
||||
- VIRTUAL_HOST=1.whoami.local
|
||||
|
||||
|
||||
whoami2:
|
||||
deploy:
|
||||
replicas: 2
|
||||
image: jwilder/whoami
|
||||
networks:
|
||||
- proxy
|
||||
environment:
|
||||
- VIRTUAL_HOST=2.whoami.local
|
||||
|
||||
|
||||
whoami3:
|
||||
deploy:
|
||||
replicas: 3
|
||||
image: jwilder/whoami
|
||||
networks:
|
||||
- proxy
|
||||
environment:
|
||||
- VIRTUAL_HOST=3.whoami.local
|
||||
|
||||
networks:
|
||||
proxy:
|
||||
name: proxy
|
Loading…
Reference in a new issue