Switch to docker setup

This commit is contained in:
sonny 2023-07-02 12:56:44 +02:00
parent 06e8243445
commit 5091fd5b60
28 changed files with 85 additions and 1036 deletions

View file

@ -1,4 +0,0 @@
[defaults]
roles_path = ./roles
remote_user = ansible
inventory = ./inventory.yml

View file

@ -1,34 +0,0 @@
- name: restart gunicorn socket
systemd:
daemon-reload: true
name: gunicorn.socket
state: restarted
enabled: true
- name: stop gunicorn service
systemd:
daemon-reload: true
name: gunicorn.service
state: stopped
enabled: false
- name: restart pgbouncer
systemd:
daemon-reload: true
name: pgbouncer
state: restarted
enabled: true
- name: restart celery
systemd:
daemon-reload: true
name: celery
state: restarted
enabled: true
- name: restart celerybeat
systemd:
daemon-reload: true
name: celerybeat
state: restarted
enabled: true

View file

@ -1,3 +0,0 @@
newsreader:
hosts:
192.168.178.63:

View file

@ -1,44 +1,9 @@
- hosts: newsreader - hosts: localhost
become: true
become_method: sudo
pre_tasks:
- name: install packages
apt:
name: '{{ packages }}'
state: present
notify:
- restart postfix
- include_role:
name: common
tasks_from: 'network.yml'
- include_role:
name: common
tasks_from: 'host.yml'
- include_role:
name: common
tasks_from: 'sudoers.yml'
loop:
- { src: '../../templates/sudoers.j2', dest: '/etc/sudoers.d/30-ansible-extra' }
roles:
- common
tasks: tasks:
- import_tasks: 'tasks/main.yml' - import_tasks: 'tasks.yml'
- include_role:
name: common
tasks_from: 'ssl.yml'
- include_role:
name: common
tasks_from: 'nginx.yml'
- import_tasks: 'tasks/setup.yml'
- import_tasks: 'tasks/poetry.yml'
- import_tasks: 'tasks/project.yml'
handlers:
- import_tasks: 'handlers.yml'
vars_files: vars_files:
- 'vars/app.yml' - 'vars/app.yml'
- 'vars/email.yml' - 'vars/email.yml'
- 'vars/main.yml'
- 'vars/network.yml'
- 'vars/postgres.yml' - 'vars/postgres.yml'
- 'vars/reddit.yml' - 'vars/reddit.yml'
- 'vars/twitter.yml' - 'vars/twitter.yml'

View file

@ -1,8 +0,0 @@
- src: git+https://git.fudiggity.nl/ansible/common.git
name: common
version: master
scm: git
- src: git+https://git.fudiggity.nl/ansible/npm.git
name: npm
version: master
scm: git

44
tasks.yml Normal file
View file

@ -0,0 +1,44 @@
- name: create sites directory
become: true
file:
path: '/srv/docker'
state: directory
owner: root
group: root
mode: 0755
- name: create newsreader app directory
become: true
file:
path: '{{ app_dir }}'
state: directory
mode: '0755'
owner: 'sonny'
group: 'sonny'
- name: clone project
git:
repo: '{{ app_repository }}'
dest: '{{ app_dir }}'
version: '{{ app_branch }}'
update: true
- name: copy .production.env
template:
src: 'templates/env.j2'
dest: '{{ app_dir }}/.production.env'
- name: stop newsreader
docker_compose:
project_src: '{{ app_dir }}'
state: absent
- name: start newsreader
docker_compose:
build: true
state: present
project_src: '{{ app_dir }}'
env_file: '{{ app_dir }}/.production.env'
files:
- docker-compose.yml
- docker-compose.production.yml

View file

@ -1,126 +0,0 @@
- name: copy firewall templates
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
owner: root
group: root
mode: '0600'
loop:
- { src: 'templates/nftables.j2', dest: '/etc/nftables.conf' }
notify: restart nftables
- name: copy memcached conf
template:
src: 'templates/memcached.j2'
dest: '/etc/memcached.conf'
owner: root
group: root
mode: '0644'
notify: restart memcached
- name: add gitlab to known hosts
become_user: '{{ app_user }}'
known_hosts:
name: '{{ gitlab_domain }}'
key: '{{ gitlab_host_key }}'
- name: add gitlab pubkey
authorized_key:
user: ansible
state: present
key: '{{ app_deploy_key }}'
- name: Add newsreader user
user:
name: '{{ app_user }}'
create_home: yes
shell: /bin/bash
- name: create ssh dir
file:
path: '/home/{{ app_user }}/.ssh'
state: directory
owner: '{{ app_user }}'
group: '{{ app_user }}'
mode: 0755
- name: create rabbitmq service override dir
file:
path: /etc/systemd/system/rabbitmq-server.service.d/
state: directory
mode: '0644'
- name: copy rabbitmq configurations
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
owner: '{{ item.owner }}'
group: '{{ item.group }}'
mode: '{{ item.mode }}'
loop:
- {
src: 'templates/limits.j2',
dest: '/etc/systemd/system/rabbitmq-server.service.d/limits.conf',
mode: '0644',
group: 'root',
owner: 'root',
}
- {
src: 'rabbitmq.conf.j2',
dest: '/etc/rabbitmq/rabbitmq-env.conf',
mode: '0644',
group: 'rabbitmq',
owner: 'rabbitmq',
}
notify: restart rabbitmq
- name: copy nginx config
template:
src: 'templates/nginx.j2'
dest: '/etc/nginx/sites-available/newsreader'
owner: root
group: root
mode: '0644'
- name: link nginx config
file:
src: '/etc/nginx/sites-available/newsreader'
dest: '/etc/nginx/sites-enabled/newsreader'
owner: root
group: root
mode: '0777'
state: link
- name: copy nftables config
template:
src: 'templates/nftables.j2'
dest: '/etc/nftables.conf'
owner: root
group: root
mode: '0600'
notify: restart nftables
- name: copy pgbouncer config
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
owner: postgres
group: postgres
mode: '{{ item.mode }}'
loop:
- {
src: 'templates/pgbouncer.j2',
dest: '/etc/pgbouncer/pgbouncer.ini',
'mode': '0640',
}
- {
src: 'templates/pgbouncer-users.j2',
dest: '/etc/pgbouncer/userlist.txt',
'mode': '0640',
}
- name: ensure pgbouncer is restarted
systemd:
name: pgbouncer
state: restarted
enabled: true

View file

@ -1,32 +0,0 @@
- name: include poetry tasks
include_role:
name: common
tasks_from: 'poetry.yml'
vars:
poetry_user: '{{ app_user }}'
poetry_dir: '/home/{{ app_user }}/.poetry'
- name: retrieve user $PATH
shell: 'echo $PATH'
become_user: '{{ app_user }}'
register: path_stats
- name: set poetry user variables
set_fact:
poetry_user_path: '{{ path_stats.stdout }}'
- name: set default venv python version
become_user: '{{ app_user }}'
command: 'poetry env use python3.7'
args:
chdir: '{{ app_dir }}'
environment:
PATH: '/home/{{ app_user }}/.local/bin:{{ poetry_user_path }}'
- name: install project dependencies
become_user: '{{ app_user }}'
command: 'poetry install --extras "sentry"'
args:
chdir: '{{ app_dir }}'
environment:
PATH: '/home/{{ app_user }}/.local/bin:{{ poetry_user_path }}'

View file

@ -1,101 +0,0 @@
- name: install npm packages
become_user: '{{ app_user }}'
command: /usr/bin/npm install
args:
chdir: '{{ app_dir }}'
- name: build static files
become_user: '{{ app_user }}'
command: /usr/bin/npm run build:prod
args:
chdir: '{{ app_dir }}'
- name: run migrations
become_user: '{{ app_user }}'
django_manage:
command: migrate
app_path: '{{ app_dir }}/src/'
virtualenv: '{{ app_dir }}/.venv'
settings: 'newsreader.conf.production'
- name: collect static files
become_user: '{{ app_user }}'
django_manage:
command: collectstatic
app_path: '{{ app_dir }}/src/'
virtualenv: '{{ app_dir }}/.venv'
settings: 'newsreader.conf.production'
- name: setup env file
template:
src: 'templates/env.j2'
dest: '{{ app_dir }}/.env'
owner: '{{ app_user }}'
group: '{{ app_user }}'
mode: 0600
- name: setup gunicorn service
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
owner: root
group: root
mode: '{{ item.mode }}'
loop:
- {
src: 'templates/gunicorn-socket.j2',
dest: '/etc/systemd/system/gunicorn.socket',
'mode': '0644',
}
- {
src: 'templates/gunicorn.j2',
dest: '/etc/systemd/system/gunicorn.service',
'mode': '0644',
}
notify:
- restart gunicorn socket
- stop gunicorn service
- name: create conf dir
become_user: '{{ app_user }}'
file:
path: '/home/{{ app_user }}/.config/conf.d'
state: directory
owner: '{{ app_user }}'
group: '{{ app_user }}'
mode: 0750
- name: create celery run dir
file:
path: /run/celery
state: directory
owner: '{{ app_user }}'
group: '{{ app_user }}'
mode: 0755
- name: copy celery config
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
owner: '{{ app_user }}'
group: '{{ app_user }}'
mode: '{{ item.mode }}'
loop:
- {
src: 'templates/celery.j2',
dest: '/etc/systemd/system/celery.service',
'mode': '0644',
}
- {
src: 'templates/celerybeat.j2',
dest: '/etc/systemd/system/celerybeat.service',
'mode': '0644',
}
- {
src: 'templates/celery.env.j2',
dest: '/home/newsreader/.config/conf.d/celery',
'mode': '0640',
}
notify:
- restart celery
- restart celerybeat

View file

@ -1,24 +0,0 @@
- name: create sites dir
file:
path: '/srv/sites'
state: directory
owner: root
group: root
mode: 0755
- name: create project dir
file:
path: '{{ app_dir }}'
state: directory
owner: '{{ app_user }}'
group: '{{ app_user }}'
mode: 0755
- name: clone project
become_user: '{{ app_user }}'
git:
repo: 'https://git.fudiggity.nl/sonny/newsreader.git'
dest: '{{ app_dir }}'
version: '{{ app_branch }}'
update: true
force: true

View file

@ -1,30 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# Name of nodes to start
CELERYD_NODES="worker1 worker2"
CELERY_BIN="{{ app_dir }}/.venv/bin/celery"
CELERY_APP="newsreader"
# The scheduler to be used.
# See https://docs.celeryproject.org/en/stable/userguide/configuration.html#beat-scheduler
CELERY_SCHEDULER="django_celery_beat.schedulers:DatabaseScheduler"
# How to call manage.py
CELERYD_MULTI="multi"
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8"
# - %I will be replaced with the current child process index
# and is important when using the prefork pool to avoid race conditions.
CELERYD_PID_FILE="/run/celery/%n.pid"
CELERYD_LOG_LEVEL="INFO"
CELERYD_LOG_FILE="/dev/null"
# you may wish to add these options for Celery Beat
CELERYBEAT_PID_FILE="/run/celery/beat.pid"
DJANGO_SETTINGS_MODULE="newsreader.conf.production"

View file

@ -1,25 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
[Unit]
Description=Celery Service
After=systemd-networkd-wait-online.service
[Service]
Type=forking
User=newsreader
Group=newsreader
SyslogIdentifier=celery
EnvironmentFile=/home/newsreader/.config/conf.d/celery
RuntimeDirectory=celery
WorkingDirectory={{ app_dir }}/src
ExecStart=/bin/sh -c '${CELERY_BIN} multi start ${CELERYD_NODES} \
-A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \
--loglevel=${CELERYD_LOG_LEVEL} --logfile=${CELERYD_LOG_FILE} ${CELERYD_OPTS}'
ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE}'
ExecReload=/bin/sh -c '${CELERY_BIN} multi restart ${CELERYD_NODES} \
-A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \
--loglevel=${CELERYD_LOG_LEVEL} --logfile=${CELERYD_LOG_FILE} ${CELERYD_OPTS}'
[Install]
WantedBy=multi-user.target

View file

@ -1,19 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
[Unit]
Description=Celery Beat Service
After=celery.service
[Service]
Type=simple
User=newsreader
Group=newsreader
EnvironmentFile=/home/newsreader/.config/conf.d/celery
RuntimeDirectory=celery
WorkingDirectory={{ app_dir }}/src
ExecStart=/bin/sh -c '${CELERY_BIN} beat \
-A ${CELERY_APP} -S ${CELERY_SCHEDULER} --pidfile=${CELERYBEAT_PID_FILE} \
--loglevel=${CELERYD_LOG_LEVEL} --logfile=${CELERYD_LOG_FILE}'
[Install]
WantedBy=multi-user.target

View file

@ -1,27 +1,30 @@
POSTGRES_HOST="{{ pgbouncer_listen_address }}" POSTGRES_HOST='{{ postgres_host }}'
POSTGRES_PORT="{{ pgbouncer_port }}" POSTGRES_PORT='{{ postgres_port }}'
POSTGRES_NAME="{{ pgbouncer_name }}" POSTGRES_DB='{{ postgres_db }}'
POSTGRES_USER="{{ pgbouncer_user }}" POSTGRES_USER='{{ postgres_user }}'
POSTGRES_PASSWORD="{{ pgbouncer_password }}" POSTGRES_PASSWORD='{{ postgres_password }}'
DJANGO_SETTINGS_MODULE="{{ django_settings_module }}" DJANGO_SETTINGS_MODULE='{{ django_settings_module }}'
DJANGO_SECRET_KEY="{{ django_secret_key }}" DJANGO_SECRET_KEY='{{ django_secret_key }}'
EMAIL_HOST="{{ email_host }}" NGINX_HTTP_PORT='{{ nginx_http_port }}'
EMAIL_PORT="{{ email_port }}"
EMAIL_HOST_USER="{{ email_user }}" # TODO: setup email configuration
EMAIL_HOST_PASSWORD="{{ email_password }}" EMAIL_HOST='{{ email_host }}'
EMAIL_PORT='{{ email_port }}'
EMAIL_HOST_USER='{{ email_user }}'
EMAIL_HOST_PASSWORD='{{ email_password }}'
EMAIL_USE_TLS={{ email_tls }} EMAIL_USE_TLS={{ email_tls }}
EMAIL_USE_SSL={{ email_ssl }} EMAIL_USE_SSL={{ email_ssl }}
EMAIL_DEFAULT_FROM="{{ email_user }}" EMAIL_DEFAULT_FROM='{{ email_user }}'
REDDIT_CLIENT_ID="{{ reddit_client_id }}" REDDIT_CLIENT_ID='{{ reddit_client_id }}'
REDDIT_CLIENT_SECRET="{{ reddit_client_secret }}" REDDIT_CLIENT_SECRET='{{ reddit_client_secret }}'
REDDIT_CALLBACK_URL="{{ reddit_callback_url }}" REDDIT_CALLBACK_URL='{{ reddit_callback_url }}'
TWITTER_CONSUMER_ID="{{ twitter_client_id }}" TWITTER_CONSUMER_ID='{{ twitter_client_id }}'
TWITTER_CONSUMER_SECRET="{{ twitter_client_secret }}" TWITTER_CONSUMER_SECRET='{{ twitter_client_secret }}'
TWITTER_REDIRECT_URL="{{ twitter_redirect_url }}" TWITTER_REDIRECT_URL='{{ twitter_redirect_url }}'
SENTRY_DSN="{{ sentry_dsn }}" SENTRY_DSN='{{ sentry_dsn }}'
ADMINS="{{ admins }}" ADMINS='{{ admins }}'

View file

@ -1,11 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
[Unit]
Description=Gunicorn socket
[Socket]
ListenStream=/run/gunicorn.sock
User=www-data
[Install]
WantedBy=sockets.target

View file

@ -1,19 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
[Unit]
Description=Gunicorn daemon
Requires=gunicorn.socket
After=network.target
[Service]
User=newsreader
Group=www-data
EnvironmentFile={{ app_dir }}/.env
WorkingDirectory={{ app_dir }}/src
ExecStart={{ app_dir }}/.venv/bin/gunicorn \
--workers 3 \
--bind unix:/run/gunicorn.sock \
newsreader.wsgi:application
[Install]
WantedBy=multi-user.target

View file

@ -1,6 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# see https://www.rabbitmq.com/install-debian.html#kernel-resource-limits
#
[Service]
LimitNOFILE=64000

View file

@ -1,52 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# memcached default config file
# 2003 - Jay Bonci <jaybonci@debian.org>
# This configuration file is read by the start-memcached script provided as
# part of the Debian GNU/Linux distribution.
# Run memcached as a daemon. This command is implied, and is not needed for the
# daemon to run. See the README.Debian that comes with this package for more
# information.
-d
# Log memcached's output to /var/log/memcached
logfile /var/log/memcached.log
# Be verbose
# -v
# Be even more verbose (print client commands as well)
# -vv
# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default
# Note that the daemon will grow to this size, but does not start out holding this much
# memory
-m 64
# Default connection port is 11211
-p 11211
# Run the daemon as root. The start-memcached will default to running as root if no
# -u command is present in this config file
-u memcache
# Specify which IP address to listen on. The default is to listen on all IP addresses
# This parameter is one of the only security measures that memcached has, so make sure
# it's listening on a firewalled interface.
-l 127.0.0.1
# Limit the number of simultaneous incoming connections. The daemon default is 1024
# -c 1024
# Lock down all paged memory. Consult with the README and homepage before you do this
# -k
# Return error when memory is exhausted (rather than removing items)
# -M
# Maximize core file limit
# -r
# Use a pidfile
-P /var/run/memcached/memcached.pid

View file

@ -1,19 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# vim:set ts=2 sw=2 et:
flush ruleset
table inet filter {
chain input {
type filter hook input priority 0; policy drop;
# accept any localhost traffic
iif lo accept
# accept traffic originated from us
ct state { established, related } accept
tcp dport { 22, 80, 443 } accept
}
}

View file

@ -1,30 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
server {
listen 80;
server_name {{ hostname }};
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl;
server_name {{ hostname }};
ssl_certificate /etc/ssl/{{ app_name }}/{{ app_name }}.crt;
ssl_certificate_key /etc/ssl/{{ app_name }}/local.pem;
access_log /var/log/nginx/{{ app_name }}.log;
error_log /var/log/nginx/{{ app_name }}.log;
location /static/ {
root /srv/sites/newsreader;
}
location / {
include proxy_params;
proxy_redirect off;
proxy_pass http://unix:/run/gunicorn.sock;
}
}

View file

@ -1 +0,0 @@
"{{ pgbouncer_user }}" "{{ pgbouncer_password }}"

View file

@ -1,359 +0,0 @@
;; {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
;;
;; database name = connect string
;;
;; connect string params:
;; dbname= host= port= user= password= auth_user=
;; client_encoding= datestyle= timezone=
;; pool_size= reserve_pool= max_db_connections=
;; pool_mode= connect_query= application_name=
[databases]
newsreader = host={{ postgres_host }} port={{ postgres_port }} dbname={{ postgres_db }} user={{ postgres_user }} password={{ postgres_password }}
;; foodb over Unix socket
;foodb =
;; redirect bardb to bazdb on localhost
;bardb = host=localhost dbname=bazdb
;; access to dest database will go with single user
;forcedb = host=localhost port=300 user=baz password=foo client_encoding=UNICODE datestyle=ISO connect_query='SELECT 1'
;; use custom pool sizes
;nondefaultdb = pool_size=50 reserve_pool=10
;; use auth_user with auth_query if user not present in auth_file
;; auth_user must exist in auth_file
; foodb = auth_user=bar
;; fallback connect string
;* = host=testserver
;; User-specific configuration
[users]
;user1 = pool_mode=transaction max_user_connections=10
;; Configuration section
[pgbouncer]
;;;
;;; Administrative settings
;;;
logfile = /var/log/postgresql/pgbouncer.log
pidfile = /var/run/postgresql/pgbouncer.pid
;;;
;;; Where to wait for clients
;;;
;; IP address or * which means all IPs
listen_addr = {{ pgbouncer_listen_address }}
listen_port = {{ pgbouncer_port }}
;; Unix socket is also used for -R.
;; On Debian it should be /var/run/postgresql
;unix_socket_dir = /tmp
;unix_socket_mode = 0777
;unix_socket_group =
unix_socket_dir = /var/run/postgresql
;;;
;;; TLS settings for accepting clients
;;;
;; disable, allow, require, verify-ca, verify-full
;client_tls_sslmode = disable
;; Path to file that contains trusted CA certs
;client_tls_ca_file = <system default>
;; Private key and cert to present to clients.
;; Required for accepting TLS connections from clients.
;client_tls_key_file =
;client_tls_cert_file =
;; fast, normal, secure, legacy, <ciphersuite string>
;client_tls_ciphers = fast
;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3
;client_tls_protocols = secure
;; none, auto, legacy
;client_tls_dheparams = auto
;; none, auto, <curve name>
;client_tls_ecdhcurve = auto
;;;
;;; TLS settings for connecting to backend databases
;;;
;; disable, allow, require, verify-ca, verify-full
server_tls_sslmode = require
;; Path to that contains trusted CA certs
;server_tls_ca_file = <system default>
;; Private key and cert to present to backend.
;; Needed only if backend server require client cert.
;server_tls_key_file =
;server_tls_cert_file =
;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3
;server_tls_protocols = secure
;; fast, normal, secure, legacy, <ciphersuite string>
;server_tls_ciphers = fast
;;;
;;; Authentication settings
;;;
;; any, trust, plain, md5, cert, hba, pam
auth_type = trust
auth_file = /etc/pgbouncer/userlist.txt
;; Path to HBA-style auth config
;auth_hba_file =
;; Query to use to fetch password from database. Result
;; must have 2 columns - username and password hash.
;auth_query = SELECT usename, passwd FROM pg_shadow WHERE usename=$1
;;;
;;; Users allowed into database 'pgbouncer'
;;;
;; comma-separated list of users who are allowed to change settings
;admin_users = user2, someadmin, otheradmin
;; comma-separated list of users who are just allowed to use SHOW command
;stats_users = stats, root
;;;
;;; Pooler personality questions
;;;
;; When server connection is released back to pool:
;; session - after client disconnects (default)
;; transaction - after transaction finishes
;; statement - after statement finishes
;pool_mode = session
;; Query for cleaning connection immediately after releasing from
;; client. No need to put ROLLBACK here, pgbouncer does not reuse
;; connections where transaction is left open.
;server_reset_query = DISCARD ALL
;; Whether server_reset_query should run in all pooling modes. If it
;; is off, server_reset_query is used only for session-pooling.
;server_reset_query_always = 0
;; Comma-separated list of parameters to ignore when given in startup
;; packet. Newer JDBC versions require the extra_float_digits here.
;ignore_startup_parameters = extra_float_digits
;; When taking idle server into use, this query is run first.
;server_check_query = select 1
;; If server was used more recently that this many seconds ago,
; skip the check query. Value 0 may or may not run in immediately.
;server_check_delay = 30
;; Close servers in session pooling mode after a RECONNECT, RELOAD,
;; etc. when they are idle instead of at the end of the session.
;server_fast_close = 0
;; Use <appname - host> as application_name on server.
;application_name_add_host = 0
;; Period for updating aggregated stats.
;stats_period = 60
;;;
;;; Connection limits
;;;
;; Total number of clients that can connect
;max_client_conn = 100
;; Default pool size. 20 is good number when transaction pooling
;; is in use, in session pooling it needs to be the number of
;; max clients you want to handle at any moment
;default_pool_size = 20
;; Minimum number of server connections to keep in pool.
;min_pool_size = 0
; how many additional connection to allow in case of trouble
;reserve_pool_size = 0
;; If a clients needs to wait more than this many seconds, use reserve
;; pool.
;reserve_pool_timeout = 5
;; Maximum number of server connections for a database
;max_db_connections = 0
;; Maximum number of server connections for a user
;max_user_connections = 0
;; If off, then server connections are reused in LIFO manner
;server_round_robin = 0
;;;
;;; Logging
;;;
;; Syslog settings
;syslog = 0
;syslog_facility = daemon
;syslog_ident = pgbouncer
;; log if client connects or server connection is made
;log_connections = 1
;; log if and why connection was closed
;log_disconnections = 1
;; log error messages pooler sends to clients
;log_pooler_errors = 1
;; write aggregated stats into log
;log_stats = 1
;; Logging verbosity. Same as -v switch on command line.
;verbose = 0
;;;
;;; Timeouts
;;;
;; Close server connection if its been connected longer.
;server_lifetime = 3600
;; Close server connection if its not been used in this time. Allows
;; to clean unnecessary connections from pool after peak.
;server_idle_timeout = 600
;; Cancel connection attempt if server does not answer takes longer.
;server_connect_timeout = 15
;; If server login failed (server_connect_timeout or auth failure)
;; then wait this many second.
;server_login_retry = 15
;; Dangerous. Server connection is closed if query does not return in
;; this time. Should be used to survive network problems, _not_ as
;; statement_timeout. (default: 0)
;query_timeout = 0
;; Dangerous. Client connection is closed if the query is not
;; assigned to a server in this time. Should be used to limit the
;; number of queued queries in case of a database or network
;; failure. (default: 120)
;query_wait_timeout = 120
;; Dangerous. Client connection is closed if no activity in this
;; time. Should be used to survive network problems. (default: 0)
;client_idle_timeout = 0
;; Disconnect clients who have not managed to log in after connecting
;; in this many seconds.
;client_login_timeout = 60
;; Clean automatically created database entries (via "*") if they stay
;; unused in this many seconds.
; autodb_idle_timeout = 3600
;; Close connections which are in "IDLE in transaction" state longer
;; than this many seconds.
;idle_transaction_timeout = 0
;; How long SUSPEND/-R waits for buffer flush before closing
;; connection.
;suspend_timeout = 10
;;;
;;; Low-level tuning options
;;;
;; buffer for streaming packets
;pkt_buf = 4096
;; man 2 listen
;listen_backlog = 128
;; Max number pkt_buf to process in one event loop.
;sbuf_loopcnt = 5
;; Maximum PostgreSQL protocol packet size.
;max_packet_size = 2147483647
;; Set SO_REUSEPORT socket option
;so_reuseport = 0
;; networking options, for info: man 7 tcp
;; Linux: Notify program about new connection only if there is also
;; data received. (Seconds to wait.) On Linux the default is 45, on
;; other OS'es 0.
;tcp_defer_accept = 0
;; In-kernel buffer size (Linux default: 4096)
;tcp_socket_buffer = 0
;; whether tcp keepalive should be turned on (0/1)
;tcp_keepalive = 1
;; The following options are Linux-specific. They also require
;; tcp_keepalive=1.
;; Count of keepalive packets
;tcp_keepcnt = 0
;; How long the connection can be idle before sending keepalive
;; packets
;tcp_keepidle = 0
;; The time between individual keepalive probes
;tcp_keepintvl = 0
;; How long may transmitted data remain unacknowledged before TCP
;; connection is closed (in milliseconds)
;tcp_user_timeout = 0
;; DNS lookup caching time
;dns_max_ttl = 15
;; DNS zone SOA lookup period
;dns_zone_check_period = 0
;; DNS negative result caching time
;dns_nxdomain_ttl = 15
;; Custom resolv.conf file, to set custom DNS servers or other options
;; (default: empty = use OS settings)
;resolv_conf = /etc/pgbouncer/resolv.conf
;;;
;;; Random stuff
;;;
;; Hackish security feature. Helps against SQL injection: when PQexec
;; is disabled, multi-statement cannot be made.
;disable_pqexec = 0
;; Config file to use for next RELOAD/SIGHUP
;; By default contains config file from command line.
;conffile
;; Windows service name to register as. job_name is alias for
;; service_name, used by some Skytools scripts.
;service_name = pgbouncer
;job_name = pgbouncer
;; Read additional config from other file
;%include /etc/pgbouncer/pgbouncer-other.ini

View file

@ -1,18 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# Defaults to rabbit. This can be useful if you want to run more than one node
# per machine - RABBITMQ_NODENAME should be unique per erlang-node-and-machine
# combination. See the clustering on a single machine guide for details:
# http://www.rabbitmq.com/clustering.html#single-machine
#NODENAME=rabbit
# By default RabbitMQ will bind to all interfaces, on IPv4 and IPv6 if
# available. Set this if you only want to bind to one network interface or#
# address family.
#NODE_IP_ADDRESS=127.0.0.1
# Defaults to 5672.
#NODE_PORT=5672
# Fix rabbitmq name resolution
HOSTNAME=localhost

View file

@ -1,3 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
ansible ALL = (newsreader:newsreader) NOPASSWD: ALL

View file

@ -1,8 +1,7 @@
app_name: 'newsreader' app_name: 'newsreader'
app_user: 'newsreader' app_repository: 'https://git.fudiggity.nl/sonny/newsreader'
app_branch: 'master' app_branch: '0.4.2'
app_dir: '/srv/sites/newsreader' app_dir: '/srv/docker/newsreader'
app_deploy_key: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICq4U2MKiufVeM8fVzpfoA/rhiWTMnrJr9usAZAG6kfb Key for deploying to newsreader app'
django_settings_module: 'newsreader.conf.production' django_settings_module: 'newsreader.conf.production'
django_secret_key: !vault | django_secret_key: !vault |
@ -15,4 +14,16 @@ django_secret_key: !vault |
35626539383762383462646632616334633737623035643034643433623237323932373334316639 35626539383762383462646632616334633737623035643034643433623237323932373334316639
356533316361653939303165313766633666 356533316361653939303165313766633666
sentry_dsn: !vault |
$ANSIBLE_VAULT;1.1;AES256
62663633303937303836306431396435343838626439373733353464366266383364343365333735
3539306162396230633664663839333637303733323461620a376531366464646239366436343136
30643031653363616265363938633536386134343766656239376638643335396565326330613832
6639396631623334350a353037386433313166313736666161663964313932636462383461343463
64636433373533313933343461393638663638623430366332336265343061663030313031353665
63393062396534643934623964323437316238353535623261323531383230633536626333326262
656365343030643332303534343535336566
nginx_http_port: 5000
admins: '' admins: ''

View file

@ -1,31 +0,0 @@
default_user: 'sonny'
packages:
- memcached
- pgbouncer
- postfix
- python-psycopg2
- python3-psycopg2
- python3
- python3-pip
- python3-venv
- python3-setuptools
- python3-virtualenv
- python-pip
- python-setuptools
- python-virtualenv
- rabbitmq-server
- rustc # for building cryptograhpy
gitlab_host_key: 'git.fudiggity.nl ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICNmvcxza79T7JZMkifmquwXH/kMUqDnKs9Oob+JrRvn'
gitlab_domain: 'git.fudiggity.nl'
sentry_dsn: !vault |
$ANSIBLE_VAULT;1.1;AES256
62663633303937303836306431396435343838626439373733353464366266383364343365333735
3539306162396230633664663839333637303733323461620a376531366464646239366436343136
30643031653363616265363938633536386134343766656239376638643335396565326330613832
6639396631623334350a353037386433313166313736666161663964313932636462383461343463
64636433373533313933343461393638663638623430366332336265343061663030313031353665
63393062396534643934623964323437316238353535623261323531383230633536626333326262
656365343030643332303534343535336566

View file

@ -1,6 +0,0 @@
hostname: 'rss.fudiggity.nl'
host_interface: 'en*'
host_ip: '192.168.178.63'
host_subnet: '24'
host_gateway: '192.168.178.1'
host_dns: '192.168.178.1'

View file

@ -1,4 +1,4 @@
postgres_host: '192.168.178.165' postgres_host: 'db'
postgres_port: '5432' postgres_port: '5432'
postgres_db: 'newsreader' postgres_db: 'newsreader'
postgres_user: 'newsreader' postgres_user: 'newsreader'
@ -10,16 +10,3 @@ postgres_password: !vault |
3236326566393034650a613133623062333862623931353131663731623764393632643639363434 3236326566393034650a613133623062333862623931353131663731623764393632643639363434
38623566363265373230386535303332363564393234636561663761353235303132373865353530 38623566363265373230386535303332363564393234636561663761353235303132373865353530
6138663238346363383737633133383638383962386236343565 6138663238346363383737633133383638383962386236343565
pgbouncer_listen_address: '127.0.0.1'
pgbouncer_port: '6432'
pgbouncer_name: 'newsreader'
pgbouncer_user: 'newsreader'
pgbouncer_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
38613333336663643735396637373664363432343633653161633463373536653964656266356564
3732356130316365323466626232373835623266363433370a313732613535303864356266303462
35333164343062623362353566336439326661633135366238313235626162343165653930383562
6431666462643064310a656230623365616334613139363033626463623063313065633462663061
34303265643662363034653230323939313664623364633532626266646662643130346665653733
6138366237333461343561353333663761303039383261356535