Compare commits

...
This repository has been archived on 2025-04-27. You can view files and clone it, but you cannot make any changes to it's state, such as pushing and creating new issues, pull requests or comments.

21 commits

Author SHA1 Message Date
8ec9bbb77e Update sentry DSN
This is the DSN for GlitchTip's, which is Sentry compatible
2025-01-24 20:59:43 +01:00
385859ec1c Update project repo URL 2025-01-24 08:26:28 +01:00
184c97b2c9 Update restart policy 2025-01-24 08:24:27 +01:00
6614afbc29 Update version 2024-10-14 12:29:05 +02:00
2f93d2580b Increase app server memory 2024-10-09 09:56:48 +02:00
27c2c9e1d6 Add docker-compose override & use ansible command module 2024-10-08 10:01:23 +02:00
23e73ceb60 Update to 0.5.0 2024-10-06 21:17:22 +02:00
73784bb21f Update version 2023-09-28 20:41:02 +02:00
5091fd5b60 Switch to docker setup 2023-07-02 12:56:44 +02:00
06e8243445 Merge branch 'master' of git.fudiggity.nl:ansible/newsreader 2022-05-15 21:14:43 +02:00
c89ada5182 Update CI job 2022-05-15 21:14:36 +02:00
ac4d84ba3f Merge branch 'master' of git.fudiggity.nl:ansible/newsreader 2021-11-06 20:24:27 +01:00
7e5192b83f Update pgbouncer config to bullseye's 2021-11-06 20:24:23 +01:00
2e226ebdf4 Re-run formatting 2021-04-24 13:29:03 +02:00
dcb8efd539 Include missing var file 2021-04-23 23:12:51 +02:00
2b77c15ce4 Add email settings 2021-04-23 22:56:46 +02:00
5df35b0988 Add new package dependency 2021-02-18 22:32:49 +01:00
b1f1ceefb5 Move tasks to playbook/pre_tasks 2021-01-30 17:10:27 +01:00
996d22ac69 Move requirements file 2021-01-30 10:29:49 +01:00
a7ec74c8fc Remove ansible lint file 2021-01-28 21:37:40 +01:00
d71a28e1c8 Move to simpler file structure 2021-01-28 21:32:10 +01:00
42 changed files with 258 additions and 1112 deletions

View file

@ -1,5 +0,0 @@
parseable: true
quiet: true
skip_list:
- '501'
use_default_rules: true

2
.gitignore vendored
View file

@ -12,3 +12,5 @@ node_modules/
.vaults/ .vaults/
vault vault
vaults/ vaults/
roles/

View file

@ -3,40 +3,24 @@ stages:
- test - test
cache: cache:
key: "$CI_COMMIT_REF_SLUG" key: $CI_COMMIT_REF_SLUG
paths: paths:
- .cache/pip - .cache/pip
- node_modules/ - node_modules/
lint: lint:
stage: lint
image: python:3.7
before_script:
- pip install ansible ansible-lint --quiet
script:
- ansible-lint playbook.yml
only:
refs:
- development
- merge_requests
pretty-lint:
stage: lint stage: lint
image: node:12 image: node:12
before_script: before_script:
- npm install - npm install
script: script:
- npx prettier "**/*.yml" --check - npx prettier '**/*.yml' --check
only:
refs:
- development
- merge_requests
syntax-test: syntax-test:
stage: test stage: test
image: python:3.7 image: python:3.7
before_script: before_script:
- pip install ansible ansible-lint --quiet - pip install ansible --quiet
- ansible-galaxy install -r roles/requirements.yml - ansible-galaxy install --role-file requirements.yml
script: script:
- ansible-playbook playbook.yml --syntax-check - ansible-playbook playbook.yml --syntax-check

View file

@ -1,9 +0,0 @@
{
"singleQuote": true,
"printWidth": 90,
"tabWidth": 2,
"useTabs": false,
"bracketSpacing": true,
"parser": "yaml"
}

5
.prettierrc.yml Normal file
View file

@ -0,0 +1,5 @@
singleQuote: true
printWidth: 90
tabWidth: 2
useTabs: false
bracketSpacing: true

View file

@ -1,4 +0,0 @@
[defaults]
roles_path = ./roles
remote_user = ansible
inventory = ./inventory.yml

View file

@ -1,3 +0,0 @@
newsreader:
hosts:
192.168.178.63:

33
package-lock.json generated Normal file
View file

@ -0,0 +1,33 @@
{
"name": "development",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"dependencies": {
"prettier": "^2.6.2"
}
},
"node_modules/prettier": {
"version": "2.6.2",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-2.6.2.tgz",
"integrity": "sha512-PkUpF+qoXTqhOeWL9fu7As8LXsIUZ1WYaJiY/a7McAQzxjk82OF0tibkFXVCDImZtWxbvojFjerkiLb0/q8mew==",
"bin": {
"prettier": "bin-prettier.js"
},
"engines": {
"node": ">=10.13.0"
},
"funding": {
"url": "https://github.com/prettier/prettier?sponsor=1"
}
}
},
"dependencies": {
"prettier": {
"version": "2.6.2",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-2.6.2.tgz",
"integrity": "sha512-PkUpF+qoXTqhOeWL9fu7As8LXsIUZ1WYaJiY/a7McAQzxjk82OF0tibkFXVCDImZtWxbvojFjerkiLb0/q8mew=="
}
}
}

5
package.json Normal file
View file

@ -0,0 +1,5 @@
{
"dependencies": {
"prettier": "^2.6.2"
}
}

View file

@ -1,5 +1,9 @@
- hosts: newsreader - hosts: localhost
become: yes tasks:
become_method: sudo - import_tasks: 'tasks.yml'
roles: vars_files:
- newsreader - 'vars/app.yml'
- 'vars/email.yml'
- 'vars/postgres.yml'
- 'vars/reddit.yml'
- 'vars/twitter.yml'

6
roles/.gitignore vendored
View file

@ -1,6 +0,0 @@
# ignore all external roles and files in the roles dir
/*
!.gitignore
!requirements.yml
!newsreader*/

View file

@ -1,18 +0,0 @@
app_name: "newsreader"
app_user: "newsreader"
app_branch: "master"
app_dir: "/srv/sites/newsreader"
app_deploy_key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICq4U2MKiufVeM8fVzpfoA/rhiWTMnrJr9usAZAG6kfb Key for deploying to newsreader app"
django_settings_module: "newsreader.conf.production"
django_secret_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
37383464313435333061393165373731303161343236666138313566333631303839393163313038
3934316466383964313962373865393164393164363332330a326333313631636132313033376230
66653634666463393061383731303661643662653036316332663039396164363432386137336135
3339336563316434330a376233333762656162323139336535366136633866626532376662663635
62656331306464363637393164633535393339613834383036646262326539393638393532633038
35626539383762383462646632616334633737623035643034643433623237323932373334316639
356533316361653939303165313766633666
admins: ""

View file

@ -1,14 +0,0 @@
default_user: "sonny"
gitlab_host_key: "git.fudiggity.nl ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICNmvcxza79T7JZMkifmquwXH/kMUqDnKs9Oob+JrRvn"
gitlab_domain: "git.fudiggity.nl"
sentry_dsn: !vault |
$ANSIBLE_VAULT;1.1;AES256
62663633303937303836306431396435343838626439373733353464366266383364343365333735
3539306162396230633664663839333637303733323461620a376531366464646239366436343136
30643031653363616265363938633536386134343766656239376638643335396565326330613832
6639396631623334350a353037386433313166313736666161663964313932636462383461343463
64636433373533313933343461393638663638623430366332336265343061663030313031353665
63393062396534643934623964323437316238353535623261323531383230633536626333326262
656365343030643332303534343535336566

View file

@ -1,6 +0,0 @@
hostname: "rss.fudiggity.nl"
host_interface: "en*"
host_ip: "192.168.178.63"
host_subnet: "24"
host_gateway: "192.168.178.1"
host_dns: "192.168.178.1"

View file

@ -1,25 +0,0 @@
postgres_host: "192.168.178.165"
postgres_port: "5432"
postgres_db: "newsreader"
postgres_user: "newsreader"
postgres_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
32613132353864633832306363626432343063616433343830623064326166653231313334636463
6631343261613137363864336435313664653738383232330a356163633637393433313532303563
62356162303438323139616338316130373364383331663437336662356432623136396364396539
3236326566393034650a613133623062333862623931353131663731623764393632643639363434
38623566363265373230386535303332363564393234636561663761353235303132373865353530
6138663238346363383737633133383638383962386236343565
pgbouncer_listen_address: "127.0.0.1"
pgbouncer_port: "6432"
pgbouncer_name: "newsreader"
pgbouncer_user: "newsreader"
pgbouncer_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
38613333336663643735396637373664363432343633653161633463373536653964656266356564
3732356130316365323466626232373835623266363433370a313732613535303864356266303462
35333164343062623362353566336439326661633135366238313235626162343165653930383562
6431666462643064310a656230623365616334613139363033626463623063313065633462663061
34303265643662363034653230323939313664623364633532626266646662643130346665653733
6138366237333461343561353333663761303039383261356535

View file

@ -1,34 +0,0 @@
- name: restart gunicorn socket
systemd:
daemon-reload: true
name: gunicorn.socket
state: restarted
enabled: true
- name: stop gunicorn service
systemd:
daemon-reload: true
name: gunicorn.service
state: stopped
enabled: false
- name: restart pgbouncer
systemd:
daemon-reload: true
name: pgbouncer
state: restarted
enabled: true
- name: restart celery
systemd:
daemon-reload: true
name: celery
state: restarted
enabled: true
- name: restart celerybeat
systemd:
daemon-reload: true
name: celerybeat
state: restarted
enabled: true

View file

@ -1,17 +0,0 @@
dependencies:
- common
- npm
galaxy_info:
author: sonny
description: "Newsreader installation"
license: "license GPLv3"
min_ansible_version: 2.7
issue_tracker_url: "https://git.fudiggity.nl/sonny/ansible-playbooks/-/issues"
platforms:
- name: Debian
versions:
- buster
galaxy_tags:
- development
- web

View file

@ -1,171 +0,0 @@
- include_role:
name: common
tasks_from: "network.yml"
- include_role:
name: common
tasks_from: "host.yml"
- include_role:
name: common
tasks_from: "sudoers.yml"
loop:
- {
src: "../newsreader/templates/sudoers.j2",
dest: "/etc/sudoers.d/30-ansible-extra",
}
- name: install packages
apt:
name:
- memcached
- pgbouncer
- postfix
- python-psycopg2
- python3-psycopg2
- python3
- python3-pip
- python3-venv
- python3-setuptools
- python3-virtualenv
- python-pip
- python-setuptools
- python-virtualenv
- rabbitmq-server
state: present
notify:
- restart postfix
- name: copy firewall templates
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: "0600"
with_items:
- { src: "nftables.j2", dest: "/etc/nftables.conf" }
notify: restart nftables
- name: copy memcached conf
template:
src: "memcached.j2"
dest: "/etc/memcached.conf"
owner: root
group: root
mode: "0644"
notify: restart memcached
- name: add gitlab to known hosts
become_user: "{{ app_user }}"
known_hosts:
name: "{{ gitlab_domain }}"
key: "{{ gitlab_host_key }}"
- name: add gitlab pubkey
authorized_key:
user: ansible
state: present
key: "{{ app_deploy_key }}"
- name: Add newsreader user
user:
name: "{{ app_user }}"
create_home: yes
shell: /bin/bash
- name: create ssh dir
file:
path: "/home/{{ app_user }}/.ssh"
state: directory
owner: "{{ app_user }}"
group: "{{ app_user }}"
mode: 0755
- name: create rabbitmq service override dir
file:
path: /etc/systemd/system/rabbitmq-server.service.d/
state: directory
mode: "0644"
- name: copy rabbitmq configurations
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: "{{ item.owner }}"
group: "{{ item.group }}"
mode: "{{ item.mode }}"
loop:
- {
src: "limits.j2",
dest: "/etc/systemd/system/rabbitmq-server.service.d/limits.conf",
mode: "0644",
group: "root",
owner: "root",
}
- {
src: "rabbitmq.conf.j2",
dest: "/etc/rabbitmq/rabbitmq-env.conf",
mode: "0644",
group: "rabbitmq",
owner: "rabbitmq",
}
notify: restart rabbitmq
- include_role:
name: common
tasks_from: "ssl.yml"
- include_role:
name: common
tasks_from: "nginx.yml"
- name: copy nginx config
template:
src: "nginx.j2"
dest: "/etc/nginx/sites-available/newsreader"
owner: root
group: root
mode: "0644"
- name: link nginx config
file:
src: "/etc/nginx/sites-available/newsreader"
dest: "/etc/nginx/sites-enabled/newsreader"
owner: root
group: root
mode: "0777"
state: link
- name: copy nftables config
template:
src: "nftables.j2"
dest: "/etc/nftables.conf"
owner: root
group: root
mode: "0600"
notify: restart nftables
- name: copy pgbouncer config
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: postgres
group: postgres
mode: "{{ item.mode }}"
loop:
- {
src: "pgbouncer.j2",
dest: "/etc/pgbouncer/pgbouncer.ini",
"mode": "0640",
}
- {
src: "pgbouncer-users.j2",
dest: "/etc/pgbouncer/userlist.txt",
"mode": "0640",
}
- name: ensure pgbouncer is restarted
systemd:
name: pgbouncer
state: restarted
enabled: true
- include_tasks: "project.yml"

View file

@ -1,161 +0,0 @@
- name: create sites dir
file:
path: /srv/sites
state: directory
owner: root
group: root
mode: 0755
- name: create project dir
file:
path: "{{ app_dir }}"
state: directory
owner: "{{ app_user }}"
group: "{{ app_user }}"
mode: 0755
- name: clone project
become_user: "{{ app_user }}"
git:
repo: "https://git.fudiggity.nl/sonny/newsreader.git"
dest: "{{ app_dir }}"
version: "{{ app_branch }}"
update: true
force: true
- name: install npm packages
become_user: "{{ app_user }}"
command: /usr/bin/npm install
args:
chdir: "{{ app_dir }}"
- name: build static files
become_user: "{{ app_user }}"
command: /usr/bin/npm run build:prod
args:
chdir: "{{ app_dir }}"
- name: run migrations
become_user: "{{ app_user }}"
django_manage:
command: migrate
app_path: "{{ app_dir }}/src/"
virtualenv: "{{ app_dir }}/.venv"
settings: "newsreader.conf.production"
- name: collect static files
become_user: "{{ app_user }}"
django_manage:
command: collectstatic
app_path: "{{ app_dir }}/src/"
virtualenv: "{{ app_dir }}/.venv"
settings: "newsreader.conf.production"
- name: include poetry tasks
include_role:
name: common
tasks_from: "poetry.yml"
vars:
poetry_user: "{{ app_user }}"
poetry_dir: "/home/{{ app_user }}/.poetry"
- name: run poetry tasks
block:
- name: retrieve user $PATH
shell: "echo $PATH"
become_user: "{{ app_user }}"
register: path_stats
- name: set poetry user variables
set_fact:
poetry_user_path: "{{ path_stats.stdout }}"
- name: set default venv python version
become_user: "{{ app_user }}"
command: "poetry env use python3.7"
args:
chdir: "{{ app_dir }}"
environment:
PATH: "/home/{{ app_user }}/.local/bin:{{ poetry_user_path }}"
- name: install project dependencies
become_user: "{{ app_user }}"
command: 'poetry install --extras "sentry"' # noqa 301
args:
chdir: "{{ app_dir }}"
environment:
PATH: "/home/{{ app_user }}/.local/bin:{{ poetry_user_path }}"
- name: setup env file
template:
src: "env.j2"
dest: "{{ app_dir }}/.env"
owner: "{{ app_user }}"
group: "{{ app_user }}"
mode: 0600
- name: setup gunicorn service
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: "{{ item.mode }}"
loop:
- {
src: "gunicorn-socket.j2",
dest: "/etc/systemd/system/gunicorn.socket",
"mode": "0644",
}
- {
src: "gunicorn.j2",
dest: "/etc/systemd/system/gunicorn.service",
"mode": "0644",
}
notify:
- restart gunicorn socket
- stop gunicorn service
- name: create conf dir
become_user: "{{ app_user }}"
file:
path: "/home/{{ app_user }}/.config/conf.d"
state: directory
owner: "{{ app_user }}"
group: "{{ app_user }}"
mode: 0750
- name: create celery run dir
file:
path: /run/celery
state: directory
owner: "{{ app_user }}"
group: "{{ app_user }}"
mode: 0755
- name: copy celery config
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: "{{ app_user }}"
group: "{{ app_user }}"
mode: "{{ item.mode }}"
loop:
- {
src: "celery.j2",
dest: "/etc/systemd/system/celery.service",
"mode": "0644",
}
- {
src: "celerybeat.j2",
dest: "/etc/systemd/system/celerybeat.service",
"mode": "0644",
}
- {
src: "celery.env.j2",
dest: "/home/newsreader/.config/conf.d/celery",
"mode": "0640",
}
notify:
- restart celery
- restart celerybeat

View file

@ -1,30 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# Name of nodes to start
CELERYD_NODES="worker1 worker2"
CELERY_BIN="{{ app_dir }}/.venv/bin/celery"
CELERY_APP="newsreader"
# The scheduler to be used.
# See https://docs.celeryproject.org/en/stable/userguide/configuration.html#beat-scheduler
CELERY_SCHEDULER="django_celery_beat.schedulers:DatabaseScheduler"
# How to call manage.py
CELERYD_MULTI="multi"
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8"
# - %I will be replaced with the current child process index
# and is important when using the prefork pool to avoid race conditions.
CELERYD_PID_FILE="/run/celery/%n.pid"
CELERYD_LOG_LEVEL="INFO"
CELERYD_LOG_FILE="/dev/null"
# you may wish to add these options for Celery Beat
CELERYBEAT_PID_FILE="/run/celery/beat.pid"
DJANGO_SETTINGS_MODULE="newsreader.conf.production"

View file

@ -1,25 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
[Unit]
Description=Celery Service
After=systemd-networkd-wait-online.service
[Service]
Type=forking
User=newsreader
Group=newsreader
SyslogIdentifier=celery
EnvironmentFile=/home/newsreader/.config/conf.d/celery
RuntimeDirectory=celery
WorkingDirectory={{ app_dir }}/src
ExecStart=/bin/sh -c '${CELERY_BIN} multi start ${CELERYD_NODES} \
-A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \
--loglevel=${CELERYD_LOG_LEVEL} --logfile=${CELERYD_LOG_FILE} ${CELERYD_OPTS}'
ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE}'
ExecReload=/bin/sh -c '${CELERY_BIN} multi restart ${CELERYD_NODES} \
-A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \
--loglevel=${CELERYD_LOG_LEVEL} --logfile=${CELERYD_LOG_FILE} ${CELERYD_OPTS}'
[Install]
WantedBy=multi-user.target

View file

@ -1,19 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
[Unit]
Description=Celery Beat Service
After=celery.service
[Service]
Type=simple
User=newsreader
Group=newsreader
EnvironmentFile=/home/newsreader/.config/conf.d/celery
RuntimeDirectory=celery
WorkingDirectory={{ app_dir }}/src
ExecStart=/bin/sh -c '${CELERY_BIN} beat \
-A ${CELERY_APP} -S ${CELERY_SCHEDULER} --pidfile=${CELERYBEAT_PID_FILE} \
--loglevel=${CELERYD_LOG_LEVEL} --logfile=${CELERYD_LOG_FILE}'
[Install]
WantedBy=multi-user.target

View file

@ -1,19 +0,0 @@
POSTGRES_HOST="{{ pgbouncer_listen_address }}"
POSTGRES_PORT="{{ pgbouncer_port }}"
POSTGRES_NAME="{{ pgbouncer_name }}"
POSTGRES_USER="{{ pgbouncer_user }}"
POSTGRES_PASSWORD="{{ pgbouncer_password }}"
DJANGO_SETTINGS_MODULE="{{ django_settings_module }}"
DJANGO_SECRET_KEY="{{ django_secret_key }}"
REDDIT_CLIENT_ID="{{ reddit_client_id }}"
REDDIT_CLIENT_SECRET="{{ reddit_client_secret }}"
REDDIT_CALLBACK_URL="{{ reddit_callback_url }}"
TWITTER_CONSUMER_ID="{{ twitter_client_id }}"
TWITTER_CONSUMER_SECRET="{{ twitter_client_secret }}"
TWITTER_REDIRECT_URL="{{ twitter_redirect_url }}"
SENTRY_DSN="{{ sentry_dsn }}"
ADMINS="{{ admins }}"

View file

@ -1,11 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
[Unit]
Description=Gunicorn socket
[Socket]
ListenStream=/run/gunicorn.sock
User=www-data
[Install]
WantedBy=sockets.target

View file

@ -1,19 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
[Unit]
Description=Gunicorn daemon
Requires=gunicorn.socket
After=network.target
[Service]
User=newsreader
Group=www-data
EnvironmentFile={{ app_dir }}/.env
WorkingDirectory={{ app_dir }}/src
ExecStart={{ app_dir }}/.venv/bin/gunicorn \
--workers 3 \
--bind unix:/run/gunicorn.sock \
newsreader.wsgi:application
[Install]
WantedBy=multi-user.target

View file

@ -1,6 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# see https://www.rabbitmq.com/install-debian.html#kernel-resource-limits
#
[Service]
LimitNOFILE=64000

View file

@ -1,52 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# memcached default config file
# 2003 - Jay Bonci <jaybonci@debian.org>
# This configuration file is read by the start-memcached script provided as
# part of the Debian GNU/Linux distribution.
# Run memcached as a daemon. This command is implied, and is not needed for the
# daemon to run. See the README.Debian that comes with this package for more
# information.
-d
# Log memcached's output to /var/log/memcached
logfile /var/log/memcached.log
# Be verbose
# -v
# Be even more verbose (print client commands as well)
# -vv
# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default
# Note that the daemon will grow to this size, but does not start out holding this much
# memory
-m 64
# Default connection port is 11211
-p 11211
# Run the daemon as root. The start-memcached will default to running as root if no
# -u command is present in this config file
-u memcache
# Specify which IP address to listen on. The default is to listen on all IP addresses
# This parameter is one of the only security measures that memcached has, so make sure
# it's listening on a firewalled interface.
-l 127.0.0.1
# Limit the number of simultaneous incoming connections. The daemon default is 1024
# -c 1024
# Lock down all paged memory. Consult with the README and homepage before you do this
# -k
# Return error when memory is exhausted (rather than removing items)
# -M
# Maximize core file limit
# -r
# Use a pidfile
-P /var/run/memcached/memcached.pid

View file

@ -1,19 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# vim:set ts=2 sw=2 et:
flush ruleset
table inet filter {
chain input {
type filter hook input priority 0; policy drop;
# accept any localhost traffic
iif lo accept
# accept traffic originated from us
ct state { established, related } accept
tcp dport { 22, 80, 443 } accept
}
}

View file

@ -1,30 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
server {
listen 80;
server_name {{ hostname }};
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl;
server_name {{ hostname }};
ssl_certificate /etc/ssl/{{ app_name }}/{{ app_name }}.crt;
ssl_certificate_key /etc/ssl/{{ app_name }}/local.pem;
access_log /var/log/nginx/{{ app_name }}.log;
error_log /var/log/nginx/{{ app_name }}.log;
location /static/ {
root /srv/sites/newsreader;
}
location / {
include proxy_params;
proxy_redirect off;
proxy_pass http://unix:/run/gunicorn.sock;
}
}

View file

@ -1 +0,0 @@
"{{ pgbouncer_user }}" "{{ pgbouncer_password }}"

View file

@ -1,352 +0,0 @@
;; {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
;;
;; database name = connect string
;;
;; connect string params:
;; dbname= host= port= user= password=
;; client_encoding= datestyle= timezone=
;; pool_size= connect_query=
;; auth_user=
[databases]
newsreader = host={{ postgres_host }} port={{ postgres_port }} dbname={{ postgres_db }} user={{ postgres_user }} password={{ postgres_password }}
; foodb over Unix socket
;foodb =
; redirect bardb to bazdb on localhost
;bardb = host=localhost dbname=bazdb
; access to dest database will go with single user
;forcedb = host=127.0.0.1 port=300 user=baz password=foo client_encoding=UNICODE datestyle=ISO connect_query='SELECT 1'
; use custom pool sizes
;nondefaultdb = pool_size=50 reserve_pool=10
; use auth_user with auth_query if user not present in auth_file
; auth_user must exist in auth_file
; foodb = auth_user=bar
; fallback connect string
;* = host=testserver
;; Configuration section
[pgbouncer]
;;;
;;; Administrative settings
;;;
logfile = /var/log/postgresql/pgbouncer.log
pidfile = /var/run/postgresql/pgbouncer.pid
;;;
;;; Where to wait for clients
;;;
; IP address or * which means all IPs
listen_addr = {{ pgbouncer_listen_address }}
listen_port = {{ pgbouncer_port }}
; Unix socket is also used for -R.
; On Debian it should be /var/run/postgresql
;unix_socket_dir = /tmp
;unix_socket_mode = 0777
;unix_socket_group =
unix_socket_dir = /var/run/postgresql
;;;
;;; TLS settings for accepting clients
;;;
;; disable, allow, require, verify-ca, verify-full
;client_tls_sslmode = disable
;; Path to file that contains trusted CA certs
;client_tls_ca_file = <system default>
;; Private key and cert to present to clients.
;; Required for accepting TLS connections from clients.
;client_tls_key_file =
;client_tls_cert_file =
;; fast, normal, secure, legacy, <ciphersuite string>
;client_tls_ciphers = fast
;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2
;client_tls_protocols = all
;; none, auto, legacy
;client_tls_dheparams = auto
;; none, auto, <curve name>
;client_tls_ecdhcurve = auto
;;;
;;; TLS settings for connecting to backend databases
;;;
;; disable, allow, require, verify-ca, verify-full
server_tls_sslmode = require
;; Path to that contains trusted CA certs
;server_tls_ca_file = <system default>
;; Private key and cert to present to backend.
;; Needed only if backend server require client cert.
;server_tls_key_file =
;server_tls_cert_file =
;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2
server_tls_protocols = secure
;; fast, normal, secure, legacy, <ciphersuite string>
;server_tls_ciphers = fast
;;;
;;; Authentication settings
;;;
; any, trust, plain, crypt, md5, cert, hba, pam
auth_type = trust
auth_file = /etc/pgbouncer/userlist.txt
;; Path to HBA-style auth config
;auth_hba_file =
;; Query to use to fetch password from database. Result
;; must have 2 columns - username and password hash.
;auth_query = SELECT usename, passwd FROM pg_shadow WHERE usename=$1
;;;
;;; Users allowed into database 'pgbouncer'
;;;
; comma-separated list of users, who are allowed to change settings
;admin_users = user2, someadmin, otheradmin
; comma-separated list of users who are just allowed to use SHOW command
;stats_users = stats, root
;;;
;;; Pooler personality questions
;;;
; When server connection is released back to pool:
; session - after client disconnects
; transaction - after transaction finishes
; statement - after statement finishes
pool_mode = session
;
; Query for cleaning connection immediately after releasing from client.
; No need to put ROLLBACK here, pgbouncer does not reuse connections
; where transaction is left open.
;
; Query for 8.3+:
; DISCARD ALL;
;
; Older versions:
; RESET ALL; SET SESSION AUTHORIZATION DEFAULT
;
; Empty if transaction pooling is in use.
;
server_reset_query = DISCARD ALL
; Whether server_reset_query should run in all pooling modes.
; If it is off, server_reset_query is used only for session-pooling.
;server_reset_query_always = 0
;
; Comma-separated list of parameters to ignore when given
; in startup packet. Newer JDBC versions require the
; extra_float_digits here.
;
;ignore_startup_parameters = extra_float_digits
;
; When taking idle server into use, this query is ran first.
; SELECT 1
;
;server_check_query = select 1
; If server was used more recently that this many seconds ago,
; skip the check query. Value 0 may or may not run in immediately.
;server_check_delay = 30
; Close servers in session pooling mode after a RECONNECT, RELOAD,
; etc. when they are idle instead of at the end of the session.
;server_fast_close = 0
;; Use <appname - host> as application_name on server.
;application_name_add_host = 0
;;;
;;; Connection limits
;;;
; total number of clients that can connect
max_client_conn = 100
; default pool size. 20 is good number when transaction pooling
; is in use, in session pooling it needs to be the number of
; max clients you want to handle at any moment
default_pool_size = 20
;; Minimum number of server connections to keep in pool.
;min_pool_size = 0
; how many additional connection to allow in case of trouble
;reserve_pool_size = 0
; if a clients needs to wait more than this many seconds, use reserve pool
;reserve_pool_timeout = 5
; how many total connections to a single database to allow from all pools
;max_db_connections = 0
;max_user_connections = 0
; If off, then server connections are reused in LIFO manner
;server_round_robin = 0
;;;
;;; Logging
;;;
;; Syslog settings
;syslog = 0
;syslog_facility = daemon
;syslog_ident = pgbouncer
; log if client connects or server connection is made
;log_connections = 1
; log if and why connection was closed
;log_disconnections = 1
; log error messages pooler sends to clients
;log_pooler_errors = 1
;; Period for writing aggregated stats into log.
;stats_period = 60
;; Logging verbosity. Same as -v switch on command line.
;verbose = 0
;;;
;;; Timeouts
;;;
;; Close server connection if its been connected longer.
;server_lifetime = 3600
;; Close server connection if its not been used in this time.
;; Allows to clean unnecessary connections from pool after peak.
;server_idle_timeout = 600
;; Cancel connection attempt if server does not answer takes longer.
;server_connect_timeout = 15
;; If server login failed (server_connect_timeout or auth failure)
;; then wait this many second.
;server_login_retry = 15
;; Dangerous. Server connection is closed if query does not return
;; in this time. Should be used to survive network problems,
;; _not_ as statement_timeout. (default: 0)
;query_timeout = 0
;; Dangerous. Client connection is closed if the query is not assigned
;; to a server in this time. Should be used to limit the number of queued
;; queries in case of a database or network failure. (default: 120)
;query_wait_timeout = 120
;; Dangerous. Client connection is closed if no activity in this time.
;; Should be used to survive network problems. (default: 0)
;client_idle_timeout = 0
;; Disconnect clients who have not managed to log in after connecting
;; in this many seconds.
;client_login_timeout = 60
;; Clean automatically created database entries (via "*") if they
;; stay unused in this many seconds.
; autodb_idle_timeout = 3600
;; How long SUSPEND/-R waits for buffer flush before closing connection.
;suspend_timeout = 10
;; Close connections which are in "IDLE in transaction" state longer than
;; this many seconds.
;idle_transaction_timeout = 0
;;;
;;; Low-level tuning options
;;;
;; buffer for streaming packets
;pkt_buf = 4096
;; man 2 listen
;listen_backlog = 128
;; Max number pkt_buf to process in one event loop.
;sbuf_loopcnt = 5
;; Maximum PostgreSQL protocol packet size.
;max_packet_size = 2147483647
;; networking options, for info: man 7 tcp
;; Linux: notify program about new connection only if there
;; is also data received. (Seconds to wait.)
;; On Linux the default is 45, on other OS'es 0.
;tcp_defer_accept = 0
;; In-kernel buffer size (Linux default: 4096)
;tcp_socket_buffer = 0
;; whether tcp keepalive should be turned on (0/1)
;tcp_keepalive = 1
;; The following options are Linux-specific.
;; They also require tcp_keepalive=1.
;; count of keepalive packets
;tcp_keepcnt = 0
;; how long the connection can be idle,
;; before sending keepalive packets
;tcp_keepidle = 0
;; The time between individual keepalive probes.
;tcp_keepintvl = 0
;; DNS lookup caching time
;dns_max_ttl = 15
;; DNS zone SOA lookup period
;dns_zone_check_period = 0
;; DNS negative result caching time
;dns_nxdomain_ttl = 15
;;;
;;; Random stuff
;;;
;; Hackish security feature. Helps against SQL-injection - when PQexec is disabled,
;; multi-statement cannot be made.
;disable_pqexec = 0
;; Config file to use for next RELOAD/SIGHUP.
;; By default contains config file from command line.
;conffile
;; Win32 service name to register as. job_name is alias for service_name,
;; used by some Skytools scripts.
;service_name = pgbouncer
;job_name = pgbouncer
;; Read additional config from the /etc/pgbouncer/pgbouncer-other.ini file
;%include /etc/pgbouncer/pgbouncer-other.ini

View file

@ -1,18 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
# Defaults to rabbit. This can be useful if you want to run more than one node
# per machine - RABBITMQ_NODENAME should be unique per erlang-node-and-machine
# combination. See the clustering on a single machine guide for details:
# http://www.rabbitmq.com/clustering.html#single-machine
#NODENAME=rabbit
# By default RabbitMQ will bind to all interfaces, on IPv4 and IPv6 if
# available. Set this if you only want to bind to one network interface or#
# address family.
#NODE_IP_ADDRESS=127.0.0.1
# Defaults to 5672.
#NODE_PORT=5672
# Fix rabbitmq name resolution
HOSTNAME=localhost

View file

@ -1,3 +0,0 @@
# {{ ansible_managed }} {{ ansible_date_time.time }} {{ ansible_date_time.date }}
#
ansible ALL = (newsreader:newsreader) NOPASSWD: ALL

View file

@ -1,8 +0,0 @@
- src: git+https://git.fudiggity.nl/ansible/common.git
name: common
version: master
scm: git
- src: git+https://git.fudiggity.nl/ansible/npm.git
name: npm
version: master
scm: git

73
tasks.yml Normal file
View file

@ -0,0 +1,73 @@
- name: create sites directory
become: true
file:
path: '/srv/docker'
state: directory
owner: root
group: root
mode: 0755
- name: create newsreader app directory
become: true
file:
path: '{{ app_dir }}'
state: directory
mode: '0755'
owner: 'sonny'
group: 'sonny'
- name: clone project
git:
repo: '{{ app_repository }}'
dest: '{{ app_dir }}'
version: '{{ app_ref }}'
update: true
- name: copy templates
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
loop:
- { src: 'templates/env.j2', dest: '{{ app_dir }}/.production.env' }
- {
src: 'templates/docker-compose.j2',
dest: '{{ app_dir }}/docker-compose.resources.yml'
}
- name: stop newsreader
command: >
docker compose
--env-file .production.env
--file docker-compose.yml
--file docker-compose.production.yml
--file docker-compose.resources.yml
down
args:
chdir: '{{ app_dir }}'
# to allow next startup to generate a new volume
- name: remove static volume
command: docker volume rm newsreader_static-files
- name: build newsreader
command: >
docker compose
--env-file .production.env
--file docker-compose.yml
--file docker-compose.production.yml
--file docker-compose.resources.yml
build
args:
chdir: '{{ app_dir }}'
- name: start newsreader
command: >
docker compose
--env-file .production.env
--file docker-compose.yml
--file docker-compose.production.yml
--file docker-compose.resources.yml
up
--detach
args:
chdir: '{{ app_dir }}'

View file

@ -0,0 +1,37 @@
# {{ ansible_managed }}
x-web-resources: &web-resources
deploy:
resources:
limits:
cpus: '2'
memory: 2GB
services:
db:
restart: always
deploy:
resources:
limits:
cpus: '4'
rabbitmq:
restart: always
memcached:
restart: always
celery:
<<: *web-resources
restart: always
django:
<<: *web-resources
deploy:
resources:
limits:
memory: 4GB
restart: always
nginx:
restart: always

32
templates/env.j2 Normal file
View file

@ -0,0 +1,32 @@
POSTGRES_HOST='{{ postgres_host }}'
POSTGRES_PORT='{{ postgres_port }}'
POSTGRES_DB='{{ postgres_db }}'
POSTGRES_USER='{{ postgres_user }}'
POSTGRES_PASSWORD='{{ postgres_password }}'
DJANGO_SETTINGS_MODULE='{{ django_settings_module }}'
DJANGO_SECRET_KEY='{{ django_secret_key }}'
NGINX_HTTP_PORT='{{ nginx_http_port }}'
# TODO: setup email configuration
EMAIL_HOST='{{ email_host }}'
EMAIL_PORT='{{ email_port }}'
EMAIL_HOST_USER='{{ email_user }}'
EMAIL_HOST_PASSWORD='{{ email_password }}'
EMAIL_USE_TLS={{ email_tls }}
EMAIL_USE_SSL={{ email_ssl }}
EMAIL_DEFAULT_FROM='{{ email_user }}'
REDDIT_CLIENT_ID='{{ reddit_client_id }}'
REDDIT_CLIENT_SECRET='{{ reddit_client_secret }}'
REDDIT_CALLBACK_URL='{{ reddit_callback_url }}'
TWITTER_CONSUMER_ID='{{ twitter_client_id }}'
TWITTER_CONSUMER_SECRET='{{ twitter_client_secret }}'
TWITTER_REDIRECT_URL='{{ twitter_redirect_url }}'
VERSION='{{ app_ref }}'
SENTRY_DSN='{{ sentry_dsn }}'
ADMINS='{{ admins }}'

30
vars/app.yml Normal file
View file

@ -0,0 +1,30 @@
app_name: 'newsreader'
app_repository: 'https://forgejo.fudiggity.nl/sonny/newsreader'
app_ref: '0.5.3'
app_dir: '/srv/docker/newsreader'
django_settings_module: 'newsreader.conf.production'
django_secret_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
37383464313435333061393165373731303161343236666138313566333631303839393163313038
3934316466383964313962373865393164393164363332330a326333313631636132313033376230
66653634666463393061383731303661643662653036316332663039396164363432386137336135
3339336563316434330a376233333762656162323139336535366136633866626532376662663635
62656331306464363637393164633535393339613834383036646262326539393638393532633038
35626539383762383462646632616334633737623035643034643433623237323932373334316639
356533316361653939303165313766633666
sentry_dsn: !vault |
$ANSIBLE_VAULT;1.1;AES256
37613964323436313965306364383537373437633262363036663939616235343932646238393432
3832363731633330363963616333333730633335653161330a646462303032636637386236306666
31383839663136643031613363326261633332376165323332333136656134623838343832373333
3064303236663131650a303964303962373839366532376231346366363739363934636433643139
36323635343034373232613839623137326430613033353037626430373939376138663063643065
37323533376239376261363236666333653862663663653739383936306539633635343233353266
63643331383861326634356164393732363532663539303561373230346535643936363036353665
30336132306236633062
nginx_http_port: 5000
admins: ''

14
vars/email.yml Normal file
View file

@ -0,0 +1,14 @@
email_host: 'smtp.transip.email'
email_port: 465
email_user: 'services@fudiggity.nl'
email_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
37316661346236386438646338633536623266323432386330383037353932366531643736333462
3264326231626231663233333731636134653135343730650a393666333766353261653661323436
66393963646435626163626464383136373239373235653065383265306264393961616230366632
3734386465653235340a656538313935316565623263363764383536326363313361626665313365
37653136393464636433303866326564336365356538613036656338343938373535346437613233
32656531366135636634633939346364373331646532616139663165636637333333393639613033
636565306164653538653330613432323136
email_tls: ''
email_ssl: 'true'

12
vars/postgres.yml Normal file
View file

@ -0,0 +1,12 @@
postgres_host: 'db'
postgres_port: '5432'
postgres_db: 'newsreader'
postgres_user: 'newsreader'
postgres_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
32613132353864633832306363626432343063616433343830623064326166653231313334636463
6631343261613137363864336435313664653738383232330a356163633637393433313532303563
62356162303438323139616338316130373364383331663437336662356432623136396364396539
3236326566393034650a613133623062333862623931353131663731623764393632643639363434
38623566363265373230386535303332363564393234636561663761353235303132373865353530
6138663238346363383737633133383638383962386236343565

View file

@ -12,4 +12,4 @@ reddit_client_secret: !vault |
36636464353761383464343634323035666163353561383231623337343732326263353535656165 36636464353761383464343634323035666163353561383231623337343732326263353535656165
3738633565396265320a343330623938356631376664326562353437333263386538356438653336 3738633565396265320a343330623938356631376664326562353437333263386538356438653336
64326363666638306337386266653331633938316639383034376464306238613839 64326363666638306337386266653331633938316639383034376464306238613839
reddit_callback_url: "https://rss.fudiggity.nl/accounts/settings/integrations/reddit/callback/" reddit_callback_url: 'https://rss.fudiggity.nl/accounts/settings/integrations/reddit/callback/'

View file

@ -14,4 +14,4 @@ twitter_client_secret: !vault |
32613037316134643965353138643236636632623865636632363964666161303330336136626264 32613037316134643965353138643236636632623865636632363964666161303330336136626264
63366438343633653566313231633739343036663736333037353465353439346135663733363137 63366438343633653566313231633739343036663736333037353465353439346135663733363137
386165313662356630643164396563316562 386165313662356630643164396563316562
twitter_redirect_url: "https://rss.fudiggity.nl/accounts/settings/integrations/twitter/callback/" twitter_redirect_url: 'https://rss.fudiggity.nl/accounts/settings/integrations/twitter/callback/'