This commit is contained in:
Elizabeth Hunt 2025-03-14 01:10:44 -07:00
parent aa5efc2c26
commit 3f5f2076da
Signed by: simponic
GPG Key ID: 2909B9A7FF6213EE
239 changed files with 1113 additions and 6288 deletions

BIN
.DS_Store vendored

Binary file not shown.

View File

@ -1,13 +0,0 @@
HEADSCALE_PREAUTH_KEY=
HEADSCALE_OIDC_SECRET=
LLDAP_JWT_SECRET=
LLDAP_USER_PASS=
PIHOLE_WEBPWD=
STEP_CA_ROOT_PASSWORD=
STEP_CA_INTERMEDIATE_PASSWORD=
OPENVPN_USER=
VAULTWARDEN_ADMIN_TOKEN=
INFO_FROM_PASSWORD=

84
README.md Normal file
View File

@ -0,0 +1,84 @@
# liz.coffee infra
A collection of playbooks to deploy the liz.coffee infra
## Prerequisites
- `ansible`
- `yamllint`
- `ansible-lint`
- an ssh key accepted on each line in the `inventory`
## Setup
### Vault
Secrets are managed via `ansible-vault`. Initialize or update your vault
with new secrets via our custom `./ansible-vault-init.sh` script.
Additionally if you want to only update a single secret, use
`./ansible-vault-init.sh <secret_name>`.
If you don't want to be prompted to enter your password every time you
deploy something, put your password as plain text into `secrets.pwd` as
a single line in the root src directory:
```bash
echo "<your_password>" > secrets.pwd
```
Then you can add `--vault-password-file secrets.pwd` each time you run a
deployment (or you know, use `pass` or something if you're paranoid).
### Pre-commit hooks
1. clone the repo
```bash
git clone git@git.liz.coffee:liz.coffee/infra
cd infra
```
2. add a pre-commit hook
```bash
cd .git/hooks
touch pre-commit
```
3. insert into `pre-commit` the following contents:
```bash
#!/bin/sh
set -e
# lint yaml files
echo "running yamllint..."
yamllint --strict .
# follow ansible best-practices
echo "running ansible-lint"
ansible-lint
```
4. make it executable
```bash
chmod +x pre-commit
```
## Running
`ansible-playbook -e @secrets.enc deploy.yml` will run each respectively added playbook in `deploy.yml`
using the vault intialized in the previous steps.
Though in development, one should be testing individual playbooks, and `deploy.yml`
should be left for an idea of general order of things, or for a
full deployment after testing.
NOTE: It is highly advised to run `ansible-playbook` in an `ssh-agent` session to avoid retyping your password over and over. Something along the lines of:
```bash
ssh-agent $(echo $SHELL)
ssh-add ~/.ssh/<private-key>
```

66
ansible-vault-init.sh Executable file
View File

@ -0,0 +1,66 @@
#!/bin/bash
# usage: ./ansible-vault-init.sh <? secret-name-to-update>
# password input
while true; do
read -s -p "Password: " VAULT_PASSWORD
echo
read -s -p "Confirm password: " confirmationpwd
echo
[ "$VAULT_PASSWORD" = "$confirmationpwd" ] && break
echo "Please try again"
done
###
SECRETS_KEYS_FILE="secrets.txt"
# temporary secret store
TEMP_FILE="temp_secrets.yml"
VAULT_FILE="secrets.enc"
if [ "$#" -eq 1 ]; then
SINGLE_SECRET_MODE=true
SECRET_TO_UPDATE=$1
else
SINGLE_SECRET_MODE=false
fi
if [ -f "$VAULT_FILE" ]; then
ansible-vault decrypt "$VAULT_FILE" --output="$TEMP_FILE" --vault-password-file <(echo $VAULT_PASSWORD)
else
# create the temporary file
> "$TEMP_FILE"
fi
IFS=$'\n' read -d '' -r -a secrets < "$SECRETS_KEYS_FILE"
echo "Gathering secrets..."
for secret_name in "${secrets[@]}"; do
if [ "$SINGLE_SECRET_MODE" = true ] && [ "$secret_name" != "$SECRET_TO_UPDATE" ]; then
continue
fi
if grep -q "^$secret_name:" "$TEMP_FILE"; then
if [ "$SINGLE_SECRET_MODE" = true ]; then
# Remove the old value of the secret
sed -i "/^$secret_name:/d" "$TEMP_FILE"
else
echo "Secret $secret_name already exists, skipping."
continue
fi
fi
echo -n "Enter value for $secret_name: "
read secret_value
echo "$secret_name: $secret_value" >> "$TEMP_FILE"
done
echo "Re-encrypting secrets..."
ansible-vault encrypt "$TEMP_FILE" --output="$VAULT_FILE" --vault-password-file <(echo $VAULT_PASSWORD)
# remove the temp secrets file securely
shred -u "$TEMP_FILE"
echo "Secrets have been encrypted into secrets.enc"

View File

@ -1,3 +1,4 @@
[defaults]
inventory = inventory
host_key_checking = False

View File

@ -1,185 +0,0 @@
#!/bin/bash
set -e
prompt_with_default() {
local prompt_message="$1"
local default_value="$2"
read -p "$prompt_message [$default_value]: " input
echo "${input:-$default_value}"
}
DNS_ENDPOINT=$(prompt_with_default "Enter DNS endpoint" "https://hatecomputers.club/dns")
BIND_FILE=$(prompt_with_default "Enter bind file path" "roles/nameservers/templates/db.simponic.xyz.j2")
SERVICE_TITLE=$(prompt_with_default "Enter service title" "whois simponic.")
SERVICE=$(prompt_with_default "Enter service name" "whois")
SERVICE_PORT=$(prompt_with_default "Enter service port" "8466")
SERVICE_REPO=$(prompt_with_default "Enter service repository URL" "git.simponic.xyz/simponic/$SERVICE")
SERVICE_ORIGIN=$(prompt_with_default "Enter service origin URL" "git@git.simponic.xyz:simponic/$SERVICE")
INTERNAL=$(prompt_with_default "Is the service internal? (yes/no)" "no")
SERVICE_HOST=$(prompt_with_default "Enter service host" "ryo")
PACKAGE_PATH=$(prompt_with_default "Enter package path" "$HOME/git/simponic/$SERVICE")
HATECOMPUTERS_API_KEY=$(prompt_with_default "Enter hatecomputers API key (paste from clipboard)" "$(wl-paste)")
function render_template() {
cp -r template $PACKAGE_PATH
grep -rlZ "{{ service }}" $PACKAGE_PATH | xargs -0 sed -i "s/{{ service }}/$SERVICE/g"
grep -rlZ "{{ service_host }}" $PACKAGE_PATH | xargs -0 sed -i "s/{{ service_host }}/$SERVICE_HOST/g"
grep -rlZ "{{ service_repo }}" $PACKAGE_PATH | xargs -0 sed -i "s/{{ service_repo }}/$(echo $SERVICE_REPO | sed 's/\//\\\//g')/g"
grep -rlZ "{{ service_port }}" $PACKAGE_PATH | xargs -0 sed -i "s/{{ service_port }}/$SERVICE_PORT/g"
grep -rlZ "{{ service_title }}" $PACKAGE_PATH | xargs -0 sed -i "s/{{ service_title }}/$SERVICE_TITLE/g"
}
function test_and_commit_code() {
cd $PACKAGE_PATH
go fmt ./...
go get
go mod tidy
go build
go test -v ./...
echo "everything looks good, can you make a repo at https://$SERVICE_REPO (press enter when done)"
read
echo "cool. now, please sync it with drone (https://drone.internal.simponic.xyz/simponic/$SERVICE). (press enter when done)"
read
git init
git add .
git commit -m "initial commit by simponic-infra"
git checkout -B main
git remote add origin $SERVICE_ORIGIN
git push -u origin main
cd -
}
function add_dns_records() {
if [[ "$INTERNAL" = "yes" ]]; then
name="$SERVICE.internal.simponic.xyz."
content="$SERVICE_HOST.internal.simponic.xyz."
curl -H "Authorization: Bearer $HATECOMPUTERS_API_KEY" \
-F "type=CNAME&name=$name&content=$content&ttl=43200&internal=on" \
$DNS_ENDPOINT
else
name="$SERVICE.simponic.xyz."
content="$SERVICE_HOST.simponic.xyz."
sed -i "s|;; CNAME Records|;; CNAME Records\n$name\t43200\tIN\tCNAME\t$content|" $BIND_FILE
fi
}
function add_nginx_config() {
endpoint="$SERVICE.simponic.xyz"
destination="roles/webservers/files/$SERVICE_HOST"
if [[ $INTERNAL = "yes" ]]; then
ednpoint="$SERVICE.internal.simponic.xyz"
destination="roles/private/files/$SERVICE_HOST"
else
mkdir -p $destination
echo "server {
listen 443 ssl;
server_name $endpoint;
ssl_certificate /etc/letsencrypt/live/$endpoint/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/$endpoint/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/$endpoint/fullchain.pem;
ssl_session_cache shared:SSL:50m;
ssl_session_timeout 5m;
ssl_stapling on;
ssl_stapling_verify on;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
ssl_dhparam /etc/nginx/dhparams.pem;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://127.0.0.1:$SERVICE_PORT;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection \"upgrade\";
proxy_set_header Host \$server_name;
proxy_buffering off;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$http_x_forwarded_proto;
add_header Strict-Transport-Security \"max-age=15552000; includeSubDomains\" always;
}
}" > "$destination/https.$endpoint.conf"
echo "server {
listen 80;
server_name $endpoint;
location /.well-known/acme-challenge {
root /var/www/letsencrypt;
try_files \$uri \$uri/ =404;
}
location / {
rewrite ^ https://$endpoint\$request_uri? permanent;
}
}" > "$destination/http.$endpoint.conf"
fi
}
function create_role() {
printf "\n[$SERVICE]\n$SERVICE_HOST ansible_user=root ansible_connection=ssh" >> inventory
mkdir -p roles/$SERVICE/tasks
mkdir -p roles/$SERVICE/templates
cp $PACKAGE_PATH/docker-compose.yml roles/$SERVICE/templates/docker-compose.yml.j2
echo "---
- name: ensure $SERVICE docker/compose exist
file:
path: /etc/docker/compose/$SERVICE
state: directory
owner: root
group: root
mode: 0700
- name: ensure $SERVICE db exist
file:
path: /etc/docker/compose/$SERVICE/db
state: directory
owner: root
group: root
mode: 0777
- name: ensure $SERVICE env exist
file:
path: /etc/docker/compose/$SERVICE/.env
state: file
owner: root
group: root
mode: 0700
- name: build $SERVICE docker-compose.yml.j2
template:
src: ../templates/docker-compose.yml.j2
dest: /etc/docker/compose/$SERVICE/docker-compose.yml
owner: root
group: root
mode: u=rw,g=r,o=r
- name: daemon-reload and enable $SERVICE
ansible.builtin.systemd_service:
state: restarted
enabled: true
name: docker-compose@$SERVICE" > roles/$SERVICE/tasks/main.yml
echo "- name: deploy $SERVICE
hosts: $SERVICE
roles:
- $SERVICE" > deploy-$SERVICE.yml
}
render_template
test_and_commit_code
add_dns_records
add_nginx_config
create_role

View File

@ -1,4 +0,0 @@
- name: authelia setup
hosts: authelia
roles:
- authelia

View File

@ -1,4 +0,0 @@
- name: backup-notifications setup
hosts: backup-notifications
roles:
- backup-notifications

View File

@ -1,4 +0,0 @@
- name: borg setup
hosts: borg
roles:
- borg

View File

@ -1,31 +0,0 @@
- name: add acme CA
hosts: ca
become: yes
roles:
- role: maxhoesel.smallstep.step_ca
tasks:
- name: add an acme provisioner to the ca
maxhoesel.smallstep.step_ca_provisioner:
name: ACME
type: ACME
become_user: step-ca
- name: restart step-ca
ansible.builtin.systemd_service:
name: step-ca
state: restarted
enabled: true
- name: allow step-ca port traffic on vpn
ufw:
rule: allow
from: 100.64.0.0/10
port: "{{ step_ca_port }}"
- name: restart ufw
ansible.builtin.systemd_service:
name: ufw
state: restarted
enabled: true
- name: configure trust to internal ca on all hosts
hosts: all
roles:
- ca

View File

@ -1,4 +0,0 @@
- name: basic host setup
hosts: all
roles:
- common

View File

@ -1,4 +0,0 @@
- name: drone setup
hosts: drone
roles:
- drone

View File

@ -1,4 +0,0 @@
- name: gitea setup
hosts: gitea
roles:
- gitea

View File

@ -1,4 +0,0 @@
- name: hatecomputers setup
hosts: hatecomputers
roles:
- hatecomputers

View File

@ -1,4 +0,0 @@
- name: lldap setup
hosts: lldap
roles:
- lldap

View File

@ -1,8 +0,0 @@
- name: mail setup
hosts: mail
roles:
- mail
- name: roundcube setup
hosts: roundcube
roles:
- roundcube

View File

@ -1,5 +0,0 @@
- name: basic host setup
hosts: nameservers
roles:
- dnscommon
- nameservers

View File

@ -1,4 +0,0 @@
- name: ntfy setup
hosts: ntfy
roles:
- ntfy

View File

@ -1,4 +0,0 @@
- name: owncloud setup
hosts: owncloud
roles:
- owncloud

View File

@ -1,4 +0,0 @@
- name: deploy phoneassistant
hosts: phoneassistant
roles:
- phoneassistant

View File

@ -1,4 +0,0 @@
- name: deploy phoneof
hosts: phoneof
roles:
- phoneof

View File

@ -1,4 +0,0 @@
- name: pihole setup
hosts: pihole
roles:
- pihole

View File

@ -1,4 +0,0 @@
- name: private setup
hosts: private
roles:
- private

View File

@ -1,4 +0,0 @@
- name: rainrainrain setup
hosts: rainrainrain
roles:
- rainrainrain

View File

@ -1,4 +0,0 @@
- name: roundcube setup
hosts: roundcube
roles:
- roundcube

View File

@ -1,4 +0,0 @@
- name: scurvy setup
hosts: scurvy
roles:
- scurvy

View File

@ -1,4 +0,0 @@
- name: deploy something
hosts: something
roles:
- something

View File

@ -1,4 +0,0 @@
- name: static setup
hosts: static
roles:
- static

View File

@ -1,4 +0,0 @@
- name: uptime setup
hosts: uptime
roles:
- uptime

View File

@ -1,4 +0,0 @@
- name: vaultwarden setup
hosts: vaultwarden
roles:
- vaultwarden

View File

@ -1,23 +0,0 @@
- name: prod headscale tags
hosts: prod
tasks:
- name: add prod tags to prod servers
include_role:
name: artis3n.tailscale
vars:
tailscale_args: "--login-server='https://headscale.simponic.xyz'"
tailscale_authkey: "{{ lookup('env', 'HEADSCALE_PREAUTH_KEY') }}"
tailscale_tags:
- "prod"
- name: private headscale tags
hosts: private
tasks:
- name: add private tags to private servers
include_role:
name: artis3n.tailscale
vars:
tailscale_args: "--login-server='https://headscale.simponic.xyz'"
tailscale_authkey: "{{ lookup('env', 'HEADSCALE_PREAUTH_KEY') }}"
tailscale_tags:
- "private"

View File

@ -1,4 +0,0 @@
- name: headscale setup
hosts: vpn
roles:
- vpn

View File

@ -1,4 +0,0 @@
- name: webserver setup
hosts: webservers
roles:
- webservers

View File

@ -1,4 +0,0 @@
- name: deploy whois
hosts: whois
roles:
- whois

View File

@ -1,4 +0,0 @@
- name: configure trust to internal zigbee on all hosts
hosts: zigbee
roles:
- zigbee

14
deploy.yml Normal file
View File

@ -0,0 +1,14 @@
---
- name: Common
ansible.builtin.import_playbook: playbooks/deploy-common.yml
- name: Docker
ansible.builtin.import_playbook: playbooks/deploy-docker.yml
- name: NGINX Proxy
ansible.builtin.import_playbook: playbooks/deploy-nginx-proxy.yml
- name: outbound
ansible.builtin.import_playbook: playbooks/deploy-outbound.yml

View File

@ -1,10 +1,9 @@
---
johan_ip: '100.64.0.5'
nijika_ip: '100.64.0.2'
nameserver_ip: '10.155.0.1'
ansible_user: serve
step_bootstrap_fingerprint: '2de0c420e3b6f9f8e47f325de908b2b2d395d3bc7e49ed9b672ce9be89bea1bf'
step_bootstrap_ca_url: 'ca.internal.simponic.xyz'
step_acme_cert_contact: 'elizabeth@simponic.xyz'
step_ca_port: 5239
rfc1918_cgnat_networks:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
- 100.64.0.0/10

View File

@ -1,23 +0,0 @@
borg_password: "{{ lookup('env', 'BORG_ENCRYPTION_PASSWORD') }}"
borg_repo: "{{ lookup('env', 'BORG_REPO') }}"
borg_secret_key: "{{ lookup('env', 'BORG_SECRET_KEY') }}"
borg_my_user: "root"
borg_my_group: "root"
borg_ssh_key: "/root/borg_ssh_key"
backup_topic: "{{ lookup('env', 'BORG_BACKUP_TOPIC') }}"
base_files:
- /home
- /root
- /var
- /etc
- /boot
- /opt
extra_files:
europa:
- /mnt/ssd-01/owncloud
- /mnt/ssd-01/borg/sync.sh
- /mnt/ssd-01/borg/.config
- /mnt/ssd-01/borg/.ssh

View File

@ -1,6 +0,0 @@
---
step_ca_root_password: "{{ lookup('env', 'STEP_CA_ROOT_PASSWORD') }}"
step_ca_intermediate_password: "{{ lookup('env', 'STEP_CA_INTERMEDIATE_PASSWORD') }}"
step_ca_dns: "{{ nameserver_ip }}, {{ step_bootstrap_ca_url }}"
step_ca_name: Simponic Internal CA
step_ca_address: ":{{ step_ca_port }}"

View File

@ -1,4 +0,0 @@
---
drone_gitea_client_id: "{{ lookup('env', 'DRONE_GITEA_CLIENT_ID') }}"
drone_gitea_client_secret: "{{ lookup('env', 'DRONE_GITEA_CLIENT_SECRET') }}"
drone_rpc_secret: "{{ lookup('env', 'DRONE_RPC_SECRET') }}"

View File

@ -1,3 +0,0 @@
---
lldap_jwt_secret: "{{ lookup('env', 'LLDAP_JWT_SECRET') }}"
lldap_user_pass: "{{ lookup('env', 'LLDAP_USER_PASS') }}"

View File

@ -1,5 +0,0 @@
---
domain: mail.simponic.xyz
certbot_email: elizabeth.hunt@simponic.xyz
postmaster_email: postmaster@simponic.xyz
lldap_admin_pass: "{{ lookup('env', 'LLDAP_USER_PASS') }}"

View File

@ -1,8 +0,0 @@
dns_zones:
- zone: simponic.xyz
- zone: rainrainra.in
- zone: rileyandlizzy.wedding
dns_primary_hostname: ryo
dns_replica_hostname: nijika
dns_primary_ip: 107.173.19.33
dns_replica_ip: 107.172.103.253

View File

@ -0,0 +1,10 @@
---
headscale_host: 'vpn.liz.coffee'
headscale_url: 'https://{{ headscale_host }}'
headscale_base_domain: 'vpn.liz.coffee'
headscale_port: '8080'
headscale_listen_addr: '127.0.0.1:{{ headscale_port }}'
headscale_dns_for_connected_clients_1: '1.1.1.1'
headscale_dns_for_connected_clients_2: '1.0.0.1'

10
group_vars/outbound.yml Normal file
View File

@ -0,0 +1,10 @@
---
headscale_host: 'vpn.liz.coffee'
headscale_url: 'https://{{ headscale_host }}'
headscale_base_domain: 'vpn.liz.coffee'
headscale_port: '8080'
headscale_listen_addr: '127.0.0.1:{{ headscale_port }}'
headscale_dns_for_connected_clients_1: '1.1.1.1'
headscale_dns_for_connected_clients_2: '1.0.0.1'

View File

@ -1,10 +0,0 @@
---
owncloud_admin_password: "{{ lookup('env', 'OWNCLOUD_ADMIN_PASSWORD') }}"
owncloud_domain: "owncloud.internal.simponic.xyz"
owncloud_version: "10.14.0"
owncloud_trusted_domains: "owncloud.internal.simponic.xyz,localhost,127.0.0.1"
owncloud_mount: "/mnt/ssd-01/owncloud"
owncloud_oidc_secret: "{{ lookup('env', 'OWNCLOUD_OIDC_SECRET') }}"
owncloud_mail_password: "{{ lookup('env', 'INFO_FROM_PASSWORD') }}"
owncloud_secret: "{{ lookup('env', 'OWNCLOUD_SECRET') }}"
owncloud_pwd_salt: "{{ lookup('env', 'OWNCLOUD_PWD_SALT') }}"

View File

@ -1,4 +0,0 @@
from_phone_number: "{{ lookup('env', 'FROM_PHONE_NUMBER')}}"
to_phone_number: "{{ lookup('env', 'TO_PHONE_NUMBER')}}"
httpsms_api_token: "{{ lookup('env', 'HTTPSMS_API_TOKEN')}}"
httpsms_signing_key: "{{ lookup('env', 'HTTPSMS_SIGNING_KEY')}}"

View File

@ -1,2 +0,0 @@
---
pihole_webpwd: "{{ lookup('env', 'PIHOLE_WEBPWD') }}"

View File

@ -1,2 +0,0 @@
---
openvpn_user: "{{ lookup('env', 'OPENVPN_USER') }}"

View File

@ -1,3 +0,0 @@
---
vaultwarden_admin_token: "{{ lookup('env', 'VAULTWARDEN_ADMIN_TOKEN') }}"
email_password: "{{ lookup('env', 'INFO_FROM_PASSWORD') }}"

View File

@ -1,7 +0,0 @@
---
headscale_oidc_secret: "{{ lookup('env', 'HEADSCALE_OIDC_SECRET') }}"
headscale_allowed_users:
- "elizabeth@simponic.xyz"
- "riley@simponic.xyz"
- "rain@simponic.xyz"
- "lucina@simponic.xyz"

View File

@ -1,3 +0,0 @@
---
letsencrypt_email: 'elizabeth@simponic.xyz'
hatecomputers_api_key: "{{ lookup('env', 'HATECOMPUTERS_API_KEY') }}"

112
inventory
View File

@ -1,103 +1,15 @@
[borg]
nijika ansible_user=root ansible_connection=ssh
ryo ansible_user=root ansible_connection=ssh
levi ansible_user=root ansible_connection=ssh
mail.simponic.xyz ansible_user=root ansible_connection=ssh
europa ansible_user=root ansible_connection=ssh
johan ansible_user=root ansible_connection=ssh
raspberrypi ansible_user=root ansible_connection=ssh
[docker]
swarm-one.localdomain ansible_user=serve ansible_connection=ssh ansible_become_password='{{ swarm_become_password }}'
swarm-two.localdomain ansible_user=serve ansible_connection=ssh ansible_become_password='{{ swarm_become_password }}'
swarm-three.localdomain ansible_user=serve ansible_connection=ssh ansible_become_password='{{ swarm_become_password }}'
[prod]
nijika ansible_user=root ansible_connection=ssh
ryo ansible_user=root ansible_connection=ssh
levi ansible_user=root ansible_connection=ssh
mail.simponic.xyz ansible_user=root ansible_connection=ssh
outbound-one.liz.coffee ansible_user=serve ansible_connection=ssh ansible_become_password='{{ outbound_one_become_password }}'
# outbound-two.liz.coffee ansible_user=serve ansible_connection=ssh ansible_become_password='{{ vpn_become_password }}'
[private]
johan ansible_user=root ansible_connection=ssh
europa ansible_user=root ansible_connection=ssh
raspberrypi ansible_user=root ansible_connection=ssh
[nginx-proxy]
outbound-one.liz.coffee ansible_user=serve ansible_connection=ssh ansible_become_password='{{ outbound_one_become_password }}'
# outbound-two.liz.coffee ansible_user=serve ansible_connection=ssh ansible_become_password='{{ vpn_become_password }}'
[webservers]
levi ansible_user=root ansible_connection=ssh
nijika ansible_user=root ansible_connection=ssh
ryo ansible_user=root ansible_connection=ssh
[nameservers]
ryo ansible_user=root ansible_connection=ssh
nijika ansible_user=root ansible_connection=ssh
[dnsprimary]
ryo ansible_user=root ansible_connection=ssh
[dnsreplica]
nijika ansible_user=root ansible_connection=ssh
[vpn]
nijika ansible_user=root ansible_connection=ssh
[authelia]
nijika ansible_user=root ansible_connection=ssh
[dnsinternal]
johan ansible_user=root ansible_connection=ssh
[pihole]
johan ansible_user=root ansible_connection=ssh
[vaultwarden]
johan ansible_user=root ansible_connection=ssh
[lldap]
johan ansible_user=root ansible_connection=ssh
[ca]
johan ansible_user=root ansible_connection=ssh
[mail]
mail.simponic.xyz ansible_user=root ansible_connection=ssh
[roundcube]
europa ansible_user=root ansible_connection=ssh
[scurvy]
europa ansible_user=root ansible_connection=ssh
[gitea]
nijika ansible_user=root ansible_connection=ssh
[static]
levi ansible_user=root ansible_connection=ssh
[owncloud]
europa ansible_user=root ansible_connection=ssh
[drone]
europa ansible_user=root ansible_connection=ssh
[hatecomputers]
levi ansible_user=root ansible_connection=ssh
[ntfy]
johan ansible_user=root ansible_connection=ssh
[backup-notifications]
johan ansible_user=root ansible_connection=ssh
[uptime]
raspberrypi ansible_user=root ansible_connection=ssh
[phoneof]
ryo ansible_user=root ansible_connection=ssh
[something]
ryo ansible_user=root ansible_connection=ssh
[whois]
ryo ansible_user=root ansible_connection=ssh
[phoneassistant]
johan ansible_user=root ansible_connection=ssh
[zigbee]
raspberrypi ansible_user=root ansible_connection=ssh
[outbound]
outbound-one.liz.coffee ansible_user=serve ansible_connection=ssh ansible_become_password='{{ outbound_one_become_password }}'
# outbound-two.liz.coffee ansible_user=serve ansible_connection=ssh ansible_become_password='{{ vpn_become_password }}'

1
password.txt Normal file
View File

@ -0,0 +1 @@
lKLDS8eet229nvvBppGqzS

View File

@ -0,0 +1,7 @@
---
- name: Common setup
hosts: all
become: true
roles:
- common

View File

@ -0,0 +1,7 @@
---
- name: Docker setup
hosts: docker
become: true
roles:
- docker

View File

@ -0,0 +1,7 @@
---
- name: nginx-proxy setup
hosts: nginx-proxy
become: true
roles:
- nginx-proxy

View File

@ -0,0 +1,7 @@
---
- name: outbound setup
hosts: outbound
become: true
roles:
- outbound

View File

@ -0,0 +1,22 @@
---
- name: Enable systemd-timesyncd
ansible.builtin.service:
name: systemd-timesyncd
state: restarted
enabled: true
- name: Restart sshd
ansible.builtin.service:
name: sshd
state: restarted
enabled: true
- name: Enable ufw
ansible.builtin.service:
enabled: true
- name: Reload ufw
ansible.builtin.service:
name: ufw
state: restarted

View File

@ -0,0 +1,66 @@
---
### Rly base stuff
- name: Apt upgrade, update
ansible.builtin.apt:
update_cache: true
upgrade: "dist"
- name: Install dependencies
ansible.builtin.apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg-agent
- software-properties-common
- vim
- git
- rsync
state: latest
update_cache: true
### Time
- name: Timesyncd
ansible.builtin.apt:
name:
- systemd-timesyncd
notify:
- Enable systemd-timesyncd
### SSH
- name: Copy sshd_config
ansible.builtin.copy:
src: files/sshd_config
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: u=rw,g=r,o=r
notify:
- Restart sshd
- name: Copy authorized_keys
ansible.builtin.copy:
src: files/authorized_keys
dest: /home/{{ ansible_user }}/.ssh/authorized_keys
### UFW
- name: Install ufw
ansible.builtin.apt:
name: ufw
state: present
- name: Allow ssh from rfc1918 networks
loop: "{{ rfc1918_networks }}"
community.general.ufw:
rule: allow
name: "OpenSSH"
from: "{{ item }}"
state: "enabled"
notify:
- Enable ufw
- Reload ufw

View File

@ -4,14 +4,16 @@ Requires=docker.service
After=docker.service
[Service]
Type=simple
Restart=always
RestartSec=3
RemainAfterExit=true
WorkingDirectory=/etc/docker/compose/%i
ExecStartPre=/bin/bash -c "/usr/bin/docker compose pull || true"
ExecStart=/usr/bin/docker compose up --detach --remove-orphans
ExecStart=/usr/bin/docker compose up
ExecStop=/usr/bin/docker compose down
Restart=always
RestartSec=5
StartLimitInterval=500
StartLimitBurst=3
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,204 @@
#!/bin/sh
set -e
# Defaults
HEALTHCHECK_TIMEOUT=60
NO_HEALTHCHECK_TIMEOUT=10
WAIT_AFTER_HEALTHY_DELAY=0
# Print metadata for Docker CLI plugin
if [ "$1" = "docker-cli-plugin-metadata" ]; then
cat <<EOF
{
"SchemaVersion": "0.1.0",
"Vendor": "Karol Musur",
"Version": "v0.9",
"ShortDescription": "Rollout new Compose service version"
}
EOF
exit
fi
# Save docker arguments, i.e. arguments before "rollout"
while [ $# -gt 0 ]; do
if [ "$1" = "rollout" ]; then
shift
break
fi
DOCKER_ARGS="$DOCKER_ARGS $1"
shift
done
# Check if compose v2 is available
if docker compose >/dev/null 2>&1; then
# shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
COMPOSE_COMMAND="docker $DOCKER_ARGS compose"
elif docker-compose >/dev/null 2>&1; then
COMPOSE_COMMAND="docker-compose"
else
echo "docker compose or docker-compose is required"
exit 1
fi
usage() {
cat <<EOF
Usage: docker rollout [OPTIONS] SERVICE
Rollout new Compose service version.
Options:
-h, --help Print usage
-f, --file FILE Compose configuration files
-t, --timeout N Healthcheck timeout (default: $HEALTHCHECK_TIMEOUT seconds)
-w, --wait N When no healthcheck is defined, wait for N seconds
before stopping old container (default: $NO_HEALTHCHECK_TIMEOUT seconds)
--wait-after-healthy N When healthcheck is defined and succeeds, wait for additional N seconds
before stopping the old container (default: 0 seconds)
--env-file FILE Specify an alternate environment file
EOF
}
exit_with_usage() {
usage
exit 1
}
healthcheck() {
# shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
docker $DOCKER_ARGS inspect --format='{{json .State.Health.Status}}' "$1" | grep -v "unhealthy" | grep -q "healthy"
}
scale() {
# shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
$COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES up --detach --scale "$1=$2" --no-recreate "$1"
}
main() {
# shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
if [ -z "$($COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES ps --quiet "$SERVICE")" ]; then
echo "==> Service '$SERVICE' is not running. Starting the service."
$COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES up --detach --no-recreate "$SERVICE"
exit 0
fi
# shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
OLD_CONTAINER_IDS_STRING=$($COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES ps --quiet "$SERVICE" | tr '\n' '|' | sed 's/|$//')
OLD_CONTAINER_IDS=$(echo "$OLD_CONTAINER_IDS_STRING" | tr '|' ' ')
SCALE=$(echo "$OLD_CONTAINER_IDS" | wc -w | tr -d ' ')
SCALE_TIMES_TWO=$((SCALE * 2))
echo "==> Scaling '$SERVICE' to '$SCALE_TIMES_TWO' instances"
scale "$SERVICE" $SCALE_TIMES_TWO
# Create a variable that contains the IDs of the new containers, but not the old ones
# shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
NEW_CONTAINER_IDS=$($COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES ps --quiet "$SERVICE" | grep -Ev "$OLD_CONTAINER_IDS_STRING" | tr '\n' ' ')
# Check if first container has healthcheck
# shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
if docker $DOCKER_ARGS inspect --format='{{json .State.Health}}' "$(echo $OLD_CONTAINER_IDS | cut -d\ -f 1)" | grep -q "Status"; then
echo "==> Waiting for new containers to be healthy (timeout: $HEALTHCHECK_TIMEOUT seconds)"
for _ in $(seq 1 "$HEALTHCHECK_TIMEOUT"); do
SUCCESS=0
for NEW_CONTAINER_ID in $NEW_CONTAINER_IDS; do
if healthcheck "$NEW_CONTAINER_ID"; then
SUCCESS=$((SUCCESS + 1))
fi
done
if [ "$SUCCESS" = "$SCALE" ]; then
break
fi
sleep 1
done
SUCCESS=0
for NEW_CONTAINER_ID in $NEW_CONTAINER_IDS; do
if healthcheck "$NEW_CONTAINER_ID"; then
SUCCESS=$((SUCCESS + 1))
fi
done
if [ "$SUCCESS" != "$SCALE" ]; then
echo "==> New containers are not healthy. Rolling back." >&2
docker $DOCKER_ARGS stop $NEW_CONTAINER_IDS
docker $DOCKER_ARGS rm $NEW_CONTAINER_IDS
exit 1
fi
if [ "$WAIT_AFTER_HEALTHY_DELAY" != "0" ]; then
echo "==> Waiting for healthy containers to settle down ($WAIT_AFTER_HEALTHY_DELAY seconds)"
sleep $WAIT_AFTER_HEALTHY_DELAY
fi
else
echo "==> Waiting for new containers to be ready ($NO_HEALTHCHECK_TIMEOUT seconds)"
sleep "$NO_HEALTHCHECK_TIMEOUT"
fi
echo "==> Stopping and removing old containers"
# shellcheck disable=SC2086 # DOCKER_ARGS and OLD_CONTAINER_IDS must be unquoted to allow multiple arguments
docker $DOCKER_ARGS stop $OLD_CONTAINER_IDS
# shellcheck disable=SC2086 # DOCKER_ARGS and OLD_CONTAINER_IDS must be unquoted to allow multiple arguments
docker $DOCKER_ARGS rm $OLD_CONTAINER_IDS
}
while [ $# -gt 0 ]; do
case "$1" in
-h | --help)
usage
exit 0
;;
-f | --file)
COMPOSE_FILES="$COMPOSE_FILES -f $2"
shift 2
;;
--env-file)
ENV_FILES="$ENV_FILES --env-file $2"
shift 2
;;
-t | --timeout)
HEALTHCHECK_TIMEOUT="$2"
shift 2
;;
-w | --wait)
NO_HEALTHCHECK_TIMEOUT="$2"
shift 2
;;
--wait-after-healthy)
WAIT_AFTER_HEALTHY_DELAY="$2"
shift 2
;;
-*)
echo "Unknown option: $1"
exit_with_usage
;;
*)
if [ -n "$SERVICE" ]; then
echo "SERVICE is already set to '$SERVICE'"
if [ "$SERVICE" != "$1" ]; then
exit_with_usage
fi
fi
SERVICE="$1"
shift
;;
esac
done
# Require SERVICE argument
if [ -z "$SERVICE" ]; then
echo "SERVICE is missing"
exit_with_usage
fi
main

View File

@ -0,0 +1,8 @@
---
- name: Enable docker
ansible.builtin.service:
name: docker
state: restarted
enabled: true

View File

@ -0,0 +1,55 @@
---
- name: Install dependencies
ansible.builtin.apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg-agent
- software-properties-common
state: present
update_cache: true
- name: Docker GPG key
become: true
ansible.builtin.apt_key:
url: >
https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg
state: present
- name: Repository docker
ansible.builtin.apt_repository:
repo: >
deb https://download.docker.com/linux/{{ ansible_distribution | lower }}
{{ ansible_distribution_release }} stable
state: present
- name: Install docker
ansible.builtin.apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
state: present
update_cache: true
notify:
- Enable docker
- name: Copy docker rollout script
ansible.builtin.copy:
src: docker-rollout
dest: /usr/local/bin/docker-rollout
mode: 0755
- name: Copy docker-compose@.service
ansible.builtin.copy:
src: docker-compose@.service
dest: /etc/systemd/system/docker-compose@.service
- name: Ensure /etc/docker/compose exist
ansible.builtin.file:
path: /etc/docker/compose
state: directory
mode: 0700

View File

@ -0,0 +1,21 @@
---
- name: Build nginx-proxy compose dirs
ansible.builtin.file:
state: directory
dest: '/etc/docker/compose/nginx-proxy/{{ item.path }}'
with_filetree: '../templates'
when: item.state == 'directory'
- name: Build nginx-proxy compose files
ansible.builtin.template:
src: '{{ item.src }}'
dest: '/etc/docker/compose/nginx-proxy/{{ item.path }}'
with_filetree: '../templates'
when: item.state == 'file'
- name: Daemon-reload and enable nginx-proxy
ansible.builtin.systemd_service:
state: started
enabled: true
daemon_reload: true
name: docker-compose@nginx-proxy

View File

@ -0,0 +1,32 @@
---
services:
nginx-proxy:
image: nginxproxy/nginx-proxy
container_name: nginx-proxy
ports:
- "80:80"
- "443:443"
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
- ./certs:/etc/nginx/certs
networks:
- proxy
nginx-acme-companion:
image: nginxproxy/acme-companion
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- acme:/etc/acme.sh
- ./certs:/etc/nginx/certs
environment:
- DEFAULT_EMAIL={{ certs_email }}
networks:
- proxy
volumes:
acme:
networks:
proxy:
driver: bridge

View File

@ -0,0 +1,28 @@
---
- name: Build headscale compose dirs
ansible.builtin.file:
state: directory
dest: '/etc/docker/compose/headscale/{{ item.path }}'
with_filetree: '../templates'
when: item.state == 'directory'
- name: Build headscale compose files
ansible.builtin.template:
src: '{{ item.src }}'
dest: '/etc/docker/compose/headscale/{{ item.path }}'
with_filetree: '../templates'
when: item.state == 'file'
- name: Daemon-reload and enable headscale
ansible.builtin.systemd_service:
state: started
enabled: true
daemon_reload: true
name: docker-compose@headscale
- name: Perform rollout incase daemon already started
ansible.builtin.shell:
cmd: /usr/local/bin/docker-rollout rollout -f docker-compose.yml headscale
chdir: /etc/docker/compose/headscale

View File

@ -0,0 +1,387 @@
---
server_url: '{{ headscale_url }}'
listen_addr: '{{ headscale_listen_addr }}'
# Address to listen to /metrics, you may want
# to keep this endpoint private to your internal
# network
#
metrics_listen_addr: 127.0.0.1:9090
# Address to listen for gRPC.
# gRPC is used for controlling a headscale server
# remotely with the CLI
# Note: Remote access _only_ works if you have
# valid certificates.
#
# For production:
# grpc_listen_addr: 0.0.0.0:50443
grpc_listen_addr: 127.0.0.1:50443
# Allow the gRPC admin interface to run in INSECURE
# mode. This is not recommended as the traffic will
# be unencrypted. Only enable if you know what you
# are doing.
grpc_allow_insecure: false
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol.
private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# It must be within IP ranges supported by the Tailscale
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
# See below:
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
# Any other range is NOT supported, and it will cause unexpected issues.
prefixes:
v4: 100.64.0.0/10
v6: fd7a:115c:a1e0::/48
# Strategy used for allocation of IPs to nodes, available options:
# - sequential (default): assigns the next free IP from the previous given IP.
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
allocation: sequential
# DERP is a relay system that Tailscale uses when a direct
# connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
#
# headscale needs a list of DERP servers that can be presented
# to the clients.
derp:
server:
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
enabled: false
# Region ID to use for the embedded DERP server.
# The local DERP prevails if the region ID collides with other region ID coming from
# the regular DERP config.
region_id: 999
# Region code and name are displayed in the Tailscale UI to identify a DERP region
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/derp_server_private.key
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
# it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths
automatically_add_embedded_derp_region: true
# For better connection stability (especially when using an Exit-Node and DNS is not working),
# it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using:
ipv4: 1.2.3.4
ipv6: 2001:db8::1
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default
# Locally available DERP map files encoded in YAML
#
# This option is mostly interesting for people hosting
# their own DERP servers:
# https://tailscale.com/kb/1118/custom-derp-servers/
#
# paths:
# - /etc/headscale/derp-example.yaml
paths: []
# If enabled, a worker will be set up to periodically
# refresh the given sources and update the derpmap
# will be set up.
auto_update_enabled: true
# How often should we check for DERP updates?
update_frequency: 24h
# Disables the automatic check for headscale updates on startup
disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
database:
# Database type. Available options: sqlite, postgres
# Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
# All new development, testing and optimisations are done with SQLite in mind.
type: sqlite
# Enable debug mode. This setting requires the log.level to be set to "debug" or "trace".
debug: false
# GORM configuration settings.
gorm:
# Enable prepared statements.
prepare_stmt: true
# Enable parameterized queries.
parameterized_queries: true
# Skip logging "record not found" errors.
skip_err_record_not_found: true
# Threshold for slow queries in milliseconds.
slow_threshold: 1000
# SQLite config
sqlite:
path: /var/lib/headscale/db.sqlite
# Enable WAL mode for SQLite. This is recommended for production environments.
# https://www.sqlite.org/wal.html
write_ahead_log: true
# Maximum number of WAL file frames before the WAL file is automatically checkpointed.
# https://www.sqlite.org/c3ref/wal_autocheckpoint.html
# Set to 0 to disable automatic checkpointing.
wal_autocheckpoint: 1000
# # Postgres config
# Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
# See database.type for more information.
# postgres:
# # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
# host: localhost
# port: 5432
# name: headscale
# user: foo
# pass: bar
# max_open_conns: 10
# max_idle_conns: 10
# conn_max_idle_time_secs: 3600
# # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
# # in the 'ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
# ssl: false
### TLS configuration
#
## Let's encrypt / ACME
#
# headscale supports automatically requesting and setting up
# TLS for a domain with Let's Encrypt.
#
# URL to ACME directory
acme_url: https://acme-v02.api.letsencrypt.org/directory
# Email to register with ACME provider
acme_email: ""
# Domain name to request a TLS certificate for:
tls_letsencrypt_hostname: ""
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
# See: docs/ref/tls.md for more information
tls_letsencrypt_challenge_type: HTTP-01
# When HTTP-01 challenge is chosen, letsencrypt must set up a
# verification endpoint, and it will be listening on:
# :http = port 80
tls_letsencrypt_listen: ":http"
## Use already defined certificates:
tls_cert_path: ""
tls_key_path: ""
log:
# Output formatting for logs: text or json
format: text
level: info
## Policy
# headscale supports Tailscale's ACL policies.
# Please have a look to their KB to better
# understand the concepts: https://tailscale.com/kb/1018/acls/
policy:
# The mode can be "file" or "database" that defines
# where the ACL policies are stored and read from.
mode: file
# If the mode is set to "file", the path to a
# HuJSON file containing ACL policies.
path: ""
## DNS
#
# headscale supports Tailscale's DNS configuration and MagicDNS.
# Please have a look to their KB to better understand the concepts:
#
# - https://tailscale.com/kb/1054/dns/
# - https://tailscale.com/kb/1081/magicdns/
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
#
# Please note that for the DNS configuration to have any effect,
# clients must have the `--accept-dns=true` option enabled. This is the
# default for the Tailscale client. This option is enabled by default
# in the Tailscale client.
#
# Setting _any_ of the configuration and `--accept-dns=true` on the
# clients will integrate with the DNS manager on the client or
# overwrite /etc/resolv.conf.
# https://tailscale.com/kb/1235/resolv-conf
#
# If you want stop Headscale from managing the DNS configuration
# all the fields under `dns` should be set to empty values.
dns:
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
magic_dns: true
# Defines the base domain to create the hostnames for MagicDNS.
# This domain _must_ be different from the server_url domain.
# `base_domain` must be a FQDN, without the trailing dot.
# The FQDN of the hosts will be
# `hostname.base_domain` (e.g., _myhost.example.com_).
base_domain: "{{ headscale_base_domain }}"
# List of DNS servers to expose to clients.
nameservers:
global:
- {{ headscale_dns_for_connected_clients_1 }}
- {{ headscale_dns_for_connected_clients_2 }}
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
# "abc123" is example NextDNS ID, replace with yours.
# - https://dns.nextdns.io/abc123
# Split DNS (see https://tailscale.com/kb/1054/dns/),
# a map of domains and which DNS server to use for each.
split:
{}
# foo.bar.com:
# - 1.1.1.1
# darp.headscale.net:
# - 1.1.1.1
# - 8.8.8.8
# Set custom DNS search domains. With MagicDNS enabled,
# your tailnet base_domain is always the first search domain.
search_domains: []
# Extra DNS records
# so far only A and AAAA records are supported (on the tailscale side)
# See: docs/ref/dns.md
extra_records: []
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
#
# Alternatively, extra DNS records can be loaded from a JSON file.
# Headscale processes this file on each change.
# extra_records_path: /var/lib/headscale/extra-records.json
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
unix_socket: /var/run/headscale/headscale.sock
unix_socket_permission: "0770"
#
# headscale supports experimental OpenID connect support,
# it is still being tested and might have some bugs, please
# help us test it.
# OpenID Connect
# oidc:
# only_start_if_oidc_is_available: true
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# # client_secret and client_secret_path are mutually exclusive.
#
# # The amount of time from a node is authenticated with OpenID until it
# # expires and needs to reauthenticate.
# # Setting the value to "0" will mean no expiry.
# expiry: 180d
#
# # Use the expiry from the token received from OpenID when the user logged
# # in, this will typically lead to frequent need to reauthenticate and should
# # only been enabled if you know what you are doing.
# # Note: enabling this will cause `oidc.expiry` to be ignored.
# use_expiry_from_token: false
#
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
#
# scope: ["openid", "profile", "email", "custom"]
# extra_params:
# domain_hint: example.com
#
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
# # authentication request will be rejected.
#
# allowed_domains:
# - example.com
# # Note: Groups from keycloak have a leading '/'
# allowed_groups:
# - /headscale
# allowed_users:
# - alice@example.com
#
# # Optional: PKCE (Proof Key for Code Exchange) configuration
# # PKCE adds an additional layer of security to the OAuth 2.0 authorization code flow
# # by preventing authorization code interception attacks
# # See https://datatracker.ietf.org/doc/html/rfc7636
# pkce:
# # Enable or disable PKCE support (default: false)
# enabled: false
# # PKCE method to use:
# # - plain: Use plain code verifier
# # - S256: Use SHA256 hashed code verifier (default, recommended)
# method: S256
#
# # Map legacy users from pre-0.24.0 versions of headscale to the new OIDC users
# # by taking the username from the legacy user and matching it with the username
# # provided by the OIDC. This is useful when migrating from legacy users to OIDC
# # to force them using the unique identifier from the OIDC and to give them a
# # proper display name and picture if available.
# # Note that this will only work if the username from the legacy user is the same
# # and there is a possibility for account takeover should a username have changed
# # with the provider.
# # When this feature is disabled, it will cause all new logins to be created as new users.
# # Note this option will be removed in the future and should be set to false
# # on all new installations, or when all users have logged in with OIDC once.
# map_legacy_users: false
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
# to instruct tailscale nodes to log their activity to a remote server.
logtail:
# Enable logtail for this headscales clients.
# As there is currently no support for overriding the log server in headscale, this is
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
enabled: false
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
# default static port 41641. This option is intended as a workaround for some buggy
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
randomize_client_port: false

View File

@ -0,0 +1,39 @@
---
services:
headscale:
image: headscale/headscale:stable-debug # until something better comes along with wget or i make my own dockerfile...
pull_policy: always
restart: unless-stopped
command: serve
volumes:
- ./config:/etc/headscale
- ./data:/var/lib/headscale
networks:
- proxy
environment:
- VIRTUAL_HOST={{ headscale_host }}
- VIRTUAL_PORT={{ headscale_port }}
- LETSENCRYPT_HOST={{ headscale_host }}
healthcheck:
test: ["CMD", "wget", "-qO", "-", "http://localhost:8080/health"]
interval: 10s
timeout: 5s
retries: 3
headscale-ui:
image: ghcr.io/gurucomputing/headscale-ui:latest
pull_policy: always
restart: unless-stopped
networks:
- proxy
environment:
- VIRTUAL_HOST={{ headscale_host }}
- VIRTUAL_PORT={{ headscale_port }}
- LETSENCRYPT_HOST={{ headscale_host }}
- VIRTUAL_PATH=/web/
- VIRTUAL_DEST=/
networks:
proxy:
external: true

View File

@ -1,2 +0,0 @@
users_database.yml
configuration.yml

View File

@ -1,30 +0,0 @@
---
- name: ensure authelia docker/compose exist
file:
path: /etc/docker/compose/authelia
state: directory
owner: root
group: root
mode: 0700
- name: copy authelia config
copy:
src: ../files/authelia
dest: /etc/docker/compose/authelia/
owner: root
group: root
mode: u=rw,g=r,o=r
- name: build authelia docker-compose.yml.j2
template:
src: ../templates/docker-compose.yml.j2
dest: /etc/docker/compose/authelia/docker-compose.yml
owner: root
group: root
mode: u=rw,g=r,o=r
- name: daemon-reload and enable authelia
ansible.builtin.systemd_service:
state: restarted
enabled: true
name: docker-compose@authelia

View File

@ -1,19 +0,0 @@
version: '3.3'
services:
authelia:
image: authelia/authelia
container_name: authelia
volumes:
- ./authelia:/config
ports:
- 127.0.0.1:9091:9091
restart: unless-stopped
dns:
- {{ nameserver_ip }}
redis:
image: redis:alpine
container_name: redis
volumes:
- ./redis:/data
restart: unless-stopped

View File

@ -1,22 +0,0 @@
---
- name: ensure backup-notifications docker/compose exist
file:
path: /etc/docker/compose/backup-notifications
state: directory
owner: root
group: root
mode: 0700
- name: build backup-notifications docker-compose.yml.j2
template:
src: ../templates/docker-compose.yml.j2
dest: /etc/docker/compose/backup-notifications/docker-compose.yml
owner: root
group: root
mode: u=rw,g=r,o=r
- name: daemon-reload and enable backup-notifications
ansible.builtin.systemd_service:
state: restarted
enabled: true
name: docker-compose@backup-notifications

View File

@ -1,15 +0,0 @@
version: "3"
services:
backup-notify:
restart: always
image: git.simponic.xyz/simponic/backup-notify:latest
healthcheck:
test: ["CMD", "wget", "--spider", "http://localhost:8080/health"]
interval: 5s
timeout: 10s
retries: 5
ports:
- "127.0.0.1:31152:8080"
volumes:
- ./db:/app/db

View File

@ -1,29 +0,0 @@
- name: copy key
template:
src: ../templates/borg_ssh_key.j2
dest: /root/borg_ssh_key
owner: root
group: root
mode: 0600
- name: push borg
import_role:
name: borgbase.ansible_role_borgbackup
vars:
borg_encryption_passphrase: "{{ borg_password }}"
borg_repository: "{{ borg_repo }}"
borg_user: "{{ borg_my_user }}"
borg_group: "{{ borg_my_group }}"
borgmatic_timer: cron
borg_ssh_command: "ssh -o StrictHostKeyChecking=no -i {{ borg_ssh_key }}"
borg_source_directories:
"{{ base_files + (extra_files[inventory_hostname] | default([])) }}"
borgmatic_relocated_repo_access_is_ok: True
borg_retention_policy:
keep_hourly: 3
keep_daily: 7
keep_weekly: 4
keep_monthly: 6
borgmatic_hooks:
after_backup:
- "curl -d '{{ inventory_hostname }}' {{ backup_topic }}"

View File

@ -1 +0,0 @@
{{ borg_secret_key | b64decode }}

View File

@ -1,15 +0,0 @@
---
- name: get root CA certificate
command: >
curl -k -X GET -H "Content-Type:application/json" \
"https://{{ step_bootstrap_ca_url }}:{{ step_ca_port }}/root/{{ step_bootstrap_fingerprint }}"
register: root_ca_fp
- name: copy to os certificates
template:
src: "../templates/crt.j2"
dest: "/usr/local/share/ca-certificates/{{ step_bootstrap_ca_url }}.crt"
- name: update trusted certs
command: "update-ca-certificates"

View File

@ -1 +0,0 @@
{{ (root_ca_fp.stdout | from_json).ca }}

View File

@ -1,979 +0,0 @@
#
# WARNING: heavily refactored in 0.9.0 release. Please review and
# customize settings for your setup.
#
# Changes: in most of the cases you should not modify this
# file, but provide customizations in jail.local file,
# or separate .conf files under jail.d/ directory, e.g.:
#
# HOW TO ACTIVATE JAILS:
#
# YOU SHOULD NOT MODIFY THIS FILE.
#
# It will probably be overwritten or improved in a distribution update.
#
# Provide customizations in a jail.local file or a jail.d/customisation.local.
# For example to change the default bantime for all jails and to enable the
# ssh-iptables jail the following (uncommented) would appear in the .local file.
# See man 5 jail.conf for details.
#
# [DEFAULT]
# bantime = 1h
#
#
# See jail.conf(5) man page for more information
# Comments: use '#' for comment lines and ';' (following a space) for inline comments
[INCLUDES]
#before = paths-distro.conf
before = paths-debian.conf
# The DEFAULT allows a global definition of the options. They can be overridden
# in each jail afterwards.
[DEFAULT]
#
# MISCELLANEOUS OPTIONS
#
# "bantime.increment" allows to use database for searching of previously banned ip's to increase a
# default ban time using special formula, default it is banTime * 1, 2, 4, 8, 16, 32...
#bantime.increment = true
# "bantime.rndtime" is the max number of seconds using for mixing with random time
# to prevent "clever" botnets calculate exact time IP can be unbanned again:
#bantime.rndtime =
# "bantime.maxtime" is the max number of seconds using the ban time can reach (doesn't grow further)
#bantime.maxtime =
# "bantime.factor" is a coefficient to calculate exponent growing of the formula or common multiplier,
# default value of factor is 1 and with default value of formula, the ban time
# grows by 1, 2, 4, 8, 16 ...
#bantime.factor = 1
# "bantime.formula" used by default to calculate next value of ban time, default value below,
# the same ban time growing will be reached by multipliers 1, 2, 4, 8, 16, 32...
#bantime.formula = ban.Time * (1<<(ban.Count if ban.Count<20 else 20)) * banFactor
#
# more aggressive example of formula has the same values only for factor "2.0 / 2.885385" :
#bantime.formula = ban.Time * math.exp(float(ban.Count+1)*banFactor)/math.exp(1*banFactor)
# "bantime.multipliers" used to calculate next value of ban time instead of formula, corresponding
# previously ban count and given "bantime.factor" (for multipliers default is 1);
# following example grows ban time by 1, 2, 4, 8, 16 ... and if last ban count greater as multipliers count,
# always used last multiplier (64 in example), for factor '1' and original ban time 600 - 10.6 hours
#bantime.multipliers = 1 2 4 8 16 32 64
# following example can be used for small initial ban time (bantime=60) - it grows more aggressive at begin,
# for bantime=60 the multipliers are minutes and equal: 1 min, 5 min, 30 min, 1 hour, 5 hour, 12 hour, 1 day, 2 day
#bantime.multipliers = 1 5 30 60 300 720 1440 2880
# "bantime.overalljails" (if true) specifies the search of IP in the database will be executed
# cross over all jails, if false (default), only current jail of the ban IP will be searched
#bantime.overalljails = false
# --------------------
# "ignoreself" specifies whether the local resp. own IP addresses should be ignored
# (default is true). Fail2ban will not ban a host which matches such addresses.
#ignoreself = true
# "ignoreip" can be a list of IP addresses, CIDR masks or DNS hosts. Fail2ban
# will not ban a host which matches an address in this list. Several addresses
# can be defined using space (and/or comma) separator.
#ignoreip = 127.0.0.1/8 ::1
# External command that will take an tagged arguments to ignore, e.g. <ip>,
# and return true if the IP is to be ignored. False otherwise.
#
# ignorecommand = /path/to/command <ip>
ignorecommand =
# "bantime" is the number of seconds that a host is banned.
bantime = 10m
# A host is banned if it has generated "maxretry" during the last "findtime"
# seconds.
findtime = 10m
# "maxretry" is the number of failures before a host get banned.
maxretry = 5
# "maxmatches" is the number of matches stored in ticket (resolvable via tag <matches> in actions).
maxmatches = %(maxretry)s
# "backend" specifies the backend used to get files modification.
# Available options are "pyinotify", "gamin", "polling", "systemd" and "auto".
# This option can be overridden in each jail as well.
#
# pyinotify: requires pyinotify (a file alteration monitor) to be installed.
# If pyinotify is not installed, Fail2ban will use auto.
# gamin: requires Gamin (a file alteration monitor) to be installed.
# If Gamin is not installed, Fail2ban will use auto.
# polling: uses a polling algorithm which does not require external libraries.
# systemd: uses systemd python library to access the systemd journal.
# Specifying "logpath" is not valid for this backend.
# See "journalmatch" in the jails associated filter config
# auto: will try to use the following backends, in order:
# pyinotify, gamin, polling.
#
# Note: if systemd backend is chosen as the default but you enable a jail
# for which logs are present only in its own log files, specify some other
# backend for that jail (e.g. polling) and provide empty value for
# journalmatch. See https://github.com/fail2ban/fail2ban/issues/959#issuecomment-74901200
backend = systemd
# "usedns" specifies if jails should trust hostnames in logs,
# warn when DNS lookups are performed, or ignore all hostnames in logs
#
# yes: if a hostname is encountered, a DNS lookup will be performed.
# warn: if a hostname is encountered, a DNS lookup will be performed,
# but it will be logged as a warning.
# no: if a hostname is encountered, will not be used for banning,
# but it will be logged as info.
# raw: use raw value (no hostname), allow use it for no-host filters/actions (example user)
usedns = warn
# "logencoding" specifies the encoding of the log files handled by the jail
# This is used to decode the lines from the log file.
# Typical examples: "ascii", "utf-8"
#
# auto: will use the system locale setting
logencoding = auto
# "enabled" enables the jails.
# By default all jails are disabled, and it should stay this way.
# Enable only relevant to your setup jails in your .local or jail.d/*.conf
#
# true: jail will be enabled and log files will get monitored for changes
# false: jail is not enabled
enabled = false
# "mode" defines the mode of the filter (see corresponding filter implementation for more info).
mode = normal
# "filter" defines the filter to use by the jail.
# By default jails have names matching their filter name
#
filter = %(__name__)s[mode=%(mode)s]
#
# ACTIONS
#
# Some options used for actions
# Destination email address used solely for the interpolations in
# jail.{conf,local,d/*} configuration files.
destemail = root@localhost
# Sender email address used solely for some actions
sender = root@<fq-hostname>
# E-mail action. Since 0.8.1 Fail2Ban uses sendmail MTA for the
# mailing. Change mta configuration parameter to mail if you want to
# revert to conventional 'mail'.
mta = sendmail
# Default protocol
protocol = tcp
# Specify chain where jumps would need to be added in ban-actions expecting parameter chain
chain = <known/chain>
# Ports to be banned
# Usually should be overridden in a particular jail
port = 0:65535
# Format of user-agent https://tools.ietf.org/html/rfc7231#section-5.5.3
fail2ban_agent = Fail2Ban/%(fail2ban_version)s
#
# Action shortcuts. To be used to define action parameter
# Default banning action (e.g. iptables, iptables-new,
# iptables-multiport, shorewall, etc) It is used to define
# action_* variables. Can be overridden globally or per
# section within jail.local file
banaction = iptables-multiport
banaction_allports = iptables-allports
# The simplest action to take: ban only
action_ = %(banaction)s[port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
# ban & send an e-mail with whois report to the destemail.
action_mw = %(action_)s
%(mta)s-whois[sender="%(sender)s", dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s"]
# ban & send an e-mail with whois report and relevant log lines
# to the destemail.
action_mwl = %(action_)s
%(mta)s-whois-lines[sender="%(sender)s", dest="%(destemail)s", logpath="%(logpath)s", chain="%(chain)s"]
# See the IMPORTANT note in action.d/xarf-login-attack for when to use this action
#
# ban & send a xarf e-mail to abuse contact of IP address and include relevant log lines
# to the destemail.
action_xarf = %(action_)s
xarf-login-attack[service=%(__name__)s, sender="%(sender)s", logpath="%(logpath)s", port="%(port)s"]
# ban & send a notification to one or more of the 50+ services supported by Apprise.
# See https://github.com/caronc/apprise/wiki for details on what is supported.
#
# You may optionally over-ride the default configuration line (containing the Apprise URLs)
# by using 'apprise[config="/alternate/path/to/apprise.cfg"]' otherwise
# /etc/fail2ban/apprise.conf is sourced for your supported notification configuration.
# action = %(action_)s
# apprise
# ban IP on CloudFlare & send an e-mail with whois report and relevant log lines
# to the destemail.
action_cf_mwl = cloudflare[cfuser="%(cfemail)s", cftoken="%(cfapikey)s"]
%(mta)s-whois-lines[sender="%(sender)s", dest="%(destemail)s", logpath="%(logpath)s", chain="%(chain)s"]
# Report block via blocklist.de fail2ban reporting service API
#
# See the IMPORTANT note in action.d/blocklist_de.conf for when to use this action.
# Specify expected parameters in file action.d/blocklist_de.local or if the interpolation
# `action_blocklist_de` used for the action, set value of `blocklist_de_apikey`
# in your `jail.local` globally (section [DEFAULT]) or per specific jail section (resp. in
# corresponding jail.d/my-jail.local file).
#
action_blocklist_de = blocklist_de[email="%(sender)s", service="%(__name__)s", apikey="%(blocklist_de_apikey)s", agent="%(fail2ban_agent)s"]
# Report ban via abuseipdb.com.
#
# See action.d/abuseipdb.conf for usage example and details.
#
action_abuseipdb = abuseipdb
# Choose default action. To change, just override value of 'action' with the
# interpolation to the chosen action shortcut (e.g. action_mw, action_mwl, etc) in jail.local
# globally (section [DEFAULT]) or per specific section
action = %(action_)s
#
# JAILS
#
#
# SSH servers
#
[sshd]
# To use more aggressive sshd modes set filter parameter "mode" in jail.local:
# normal (default), ddos, extra or aggressive (combines all).
# See "tests/files/logs/sshd" or "filter.d/sshd.conf" for usage example and details.
mode = normal
enabled = true
port = ssh
logpath = %(sshd_log)s
backend = %(sshd_backend)s
[dropbear]
port = ssh
logpath = %(dropbear_log)s
backend = %(dropbear_backend)s
[selinux-ssh]
port = ssh
logpath = %(auditd_log)s
#
# HTTP servers
#
[apache-auth]
port = http,https
logpath = %(apache_error_log)s
[apache-badbots]
# Ban hosts which agent identifies spammer robots crawling the web
# for email addresses. The mail outputs are buffered.
port = http,https
logpath = %(apache_access_log)s
bantime = 48h
maxretry = 1
[apache-noscript]
port = http,https
logpath = %(apache_error_log)s
[apache-overflows]
port = http,https
logpath = %(apache_error_log)s
maxretry = 2
[apache-nohome]
port = http,https
logpath = %(apache_error_log)s
maxretry = 2
[apache-botsearch]
port = http,https
logpath = %(apache_error_log)s
maxretry = 2
[apache-fakegooglebot]
port = http,https
logpath = %(apache_access_log)s
maxretry = 1
ignorecommand = %(fail2ban_confpath)s/filter.d/ignorecommands/apache-fakegooglebot <ip>
[apache-modsecurity]
port = http,https
logpath = %(apache_error_log)s
maxretry = 2
[apache-shellshock]
port = http,https
logpath = %(apache_error_log)s
maxretry = 1
[openhab-auth]
filter = openhab
banaction = %(banaction_allports)s
logpath = /opt/openhab/logs/request.log
# To use more aggressive http-auth modes set filter parameter "mode" in jail.local:
# normal (default), aggressive (combines all), auth or fallback
# See "tests/files/logs/nginx-http-auth" or "filter.d/nginx-http-auth.conf" for usage example and details.
[nginx-http-auth]
# mode = normal
port = http,https
logpath = %(nginx_error_log)s
# To use 'nginx-limit-req' jail you should have `ngx_http_limit_req_module`
# and define `limit_req` and `limit_req_zone` as described in nginx documentation
# http://nginx.org/en/docs/http/ngx_http_limit_req_module.html
# or for example see in 'config/filter.d/nginx-limit-req.conf'
[nginx-limit-req]
port = http,https
logpath = %(nginx_error_log)s
[nginx-botsearch]
port = http,https
logpath = %(nginx_error_log)s
[nginx-bad-request]
port = http,https
logpath = %(nginx_access_log)s
# Ban attackers that try to use PHP's URL-fopen() functionality
# through GET/POST variables. - Experimental, with more than a year
# of usage in production environments.
[php-url-fopen]
port = http,https
logpath = %(nginx_access_log)s
%(apache_access_log)s
[suhosin]
port = http,https
logpath = %(suhosin_log)s
[lighttpd-auth]
# Same as above for Apache's mod_auth
# It catches wrong authentifications
port = http,https
logpath = %(lighttpd_error_log)s
#
# Webmail and groupware servers
#
[roundcube-auth]
port = http,https
logpath = %(roundcube_errors_log)s
# Use following line in your jail.local if roundcube logs to journal.
#backend = %(syslog_backend)s
[openwebmail]
port = http,https
logpath = /var/log/openwebmail.log
[horde]
port = http,https
logpath = /var/log/horde/horde.log
[groupoffice]
port = http,https
logpath = /home/groupoffice/log/info.log
[sogo-auth]
# Monitor SOGo groupware server
# without proxy this would be:
# port = 20000
port = http,https
logpath = /var/log/sogo/sogo.log
[tine20]
logpath = /var/log/tine20/tine20.log
port = http,https
#
# Web Applications
#
#
[drupal-auth]
port = http,https
logpath = %(syslog_daemon)s
backend = %(syslog_backend)s
[guacamole]
port = http,https
logpath = /var/log/tomcat*/catalina.out
#logpath = /var/log/guacamole.log
[monit]
#Ban clients brute-forcing the monit gui login
port = 2812
logpath = /var/log/monit
/var/log/monit.log
[webmin-auth]
port = 10000
logpath = %(syslog_authpriv)s
backend = %(syslog_backend)s
[froxlor-auth]
port = http,https
logpath = %(syslog_authpriv)s
backend = %(syslog_backend)s
#
# HTTP Proxy servers
#
#
[squid]
port = 80,443,3128,8080
logpath = /var/log/squid/access.log
[3proxy]
port = 3128
logpath = /var/log/3proxy.log
#
# FTP servers
#
[proftpd]
port = ftp,ftp-data,ftps,ftps-data
logpath = %(proftpd_log)s
backend = %(proftpd_backend)s
[pure-ftpd]
port = ftp,ftp-data,ftps,ftps-data
logpath = %(pureftpd_log)s
backend = %(pureftpd_backend)s
[gssftpd]
port = ftp,ftp-data,ftps,ftps-data
logpath = %(syslog_daemon)s
backend = %(syslog_backend)s
[wuftpd]
port = ftp,ftp-data,ftps,ftps-data
logpath = %(wuftpd_log)s
backend = %(wuftpd_backend)s
[vsftpd]
# or overwrite it in jails.local to be
# logpath = %(syslog_authpriv)s
# if you want to rely on PAM failed login attempts
# vsftpd's failregex should match both of those formats
port = ftp,ftp-data,ftps,ftps-data
logpath = %(vsftpd_log)s
#
# Mail servers
#
# ASSP SMTP Proxy Jail
[assp]
port = smtp,465,submission
logpath = /root/path/to/assp/logs/maillog.txt
[courier-smtp]
port = smtp,465,submission
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[postfix]
# To use another modes set filter parameter "mode" in jail.local:
mode = more
port = smtp,465,submission
logpath = %(postfix_log)s
backend = %(postfix_backend)s
[postfix-rbl]
filter = postfix[mode=rbl]
port = smtp,465,submission
logpath = %(postfix_log)s
backend = %(postfix_backend)s
maxretry = 1
[sendmail-auth]
port = submission,465,smtp
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[sendmail-reject]
# To use more aggressive modes set filter parameter "mode" in jail.local:
# normal (default), extra or aggressive
# See "tests/files/logs/sendmail-reject" or "filter.d/sendmail-reject.conf" for usage example and details.
#mode = normal
port = smtp,465,submission
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[qmail-rbl]
filter = qmail
port = smtp,465,submission
logpath = /service/qmail/log/main/current
# dovecot defaults to logging to the mail syslog facility
# but can be set by syslog_facility in the dovecot configuration.
[dovecot]
port = pop3,pop3s,imap,imaps,submission,465,sieve
logpath = %(dovecot_log)s
backend = %(dovecot_backend)s
[sieve]
port = smtp,465,submission
logpath = %(dovecot_log)s
backend = %(dovecot_backend)s
[solid-pop3d]
port = pop3,pop3s
logpath = %(solidpop3d_log)s
[exim]
# see filter.d/exim.conf for further modes supported from filter:
#mode = normal
port = smtp,465,submission
logpath = %(exim_main_log)s
[exim-spam]
port = smtp,465,submission
logpath = %(exim_main_log)s
[kerio]
port = imap,smtp,imaps,465
logpath = /opt/kerio/mailserver/store/logs/security.log
#
# Mail servers authenticators: might be used for smtp,ftp,imap servers, so
# all relevant ports get banned
#
[courier-auth]
port = smtp,465,submission,imap,imaps,pop3,pop3s
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[postfix-sasl]
filter = postfix[mode=auth]
port = smtp,465,submission,imap,imaps,pop3,pop3s
# You might consider monitoring /var/log/mail.warn instead if you are
# running postfix since it would provide the same log lines at the
# "warn" level but overall at the smaller filesize.
logpath = %(postfix_log)s
backend = %(postfix_backend)s
[perdition]
port = imap,imaps,pop3,pop3s
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[squirrelmail]
port = smtp,465,submission,imap,imap2,imaps,pop3,pop3s,http,https,socks
logpath = /var/lib/squirrelmail/prefs/squirrelmail_access_log
[cyrus-imap]
port = imap,imaps
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[uwimap-auth]
port = imap,imaps
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
#
#
# DNS servers
#
# !!! WARNING !!!
# Since UDP is connection-less protocol, spoofing of IP and imitation
# of illegal actions is way too simple. Thus enabling of this filter
# might provide an easy way for implementing a DoS against a chosen
# victim. See
# http://nion.modprobe.de/blog/archives/690-fail2ban-+-dns-fail.html
# Please DO NOT USE this jail unless you know what you are doing.
#
# IMPORTANT: see filter.d/named-refused for instructions to enable logging
# This jail blocks UDP traffic for DNS requests.
# [named-refused-udp]
#
# filter = named-refused
# port = domain,953
# protocol = udp
# logpath = /var/log/named/security.log
# IMPORTANT: see filter.d/named-refused for instructions to enable logging
# This jail blocks TCP traffic for DNS requests.
[named-refused]
port = domain,953
logpath = /var/log/named/security.log
[nsd]
port = 53
action_ = %(default/action_)s[name=%(__name__)s-tcp, protocol="tcp"]
%(default/action_)s[name=%(__name__)s-udp, protocol="udp"]
logpath = /var/log/nsd.log
#
# Miscellaneous
#
[asterisk]
port = 5060,5061
action_ = %(default/action_)s[name=%(__name__)s-tcp, protocol="tcp"]
%(default/action_)s[name=%(__name__)s-udp, protocol="udp"]
logpath = /var/log/asterisk/messages
maxretry = 10
[freeswitch]
port = 5060,5061
action_ = %(default/action_)s[name=%(__name__)s-tcp, protocol="tcp"]
%(default/action_)s[name=%(__name__)s-udp, protocol="udp"]
logpath = /var/log/freeswitch.log
maxretry = 10
# enable adminlog; it will log to a file inside znc's directory by default.
[znc-adminlog]
port = 6667
logpath = /var/lib/znc/moddata/adminlog/znc.log
# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld] or
# equivalent section:
# log-warnings = 2
#
# for syslog (daemon facility)
# [mysqld_safe]
# syslog
#
# for own logfile
# [mysqld]
# log-error=/var/log/mysqld.log
[mysqld-auth]
port = 3306
logpath = %(mysql_log)s
backend = %(mysql_backend)s
[mssql-auth]
# Default configuration for Microsoft SQL Server for Linux
# See the 'mssql-conf' manpage how to change logpath or port
logpath = /var/opt/mssql/log/errorlog
port = 1433
filter = mssql-auth
# Log wrong MongoDB auth (for details see filter 'filter.d/mongodb-auth.conf')
[mongodb-auth]
# change port when running with "--shardsvr" or "--configsvr" runtime operation
port = 27017
logpath = /var/log/mongodb/mongodb.log
# Jail for more extended banning of persistent abusers
# !!! WARNINGS !!!
# 1. Make sure that your loglevel specified in fail2ban.conf/.local
# is not at DEBUG level -- which might then cause fail2ban to fall into
# an infinite loop constantly feeding itself with non-informative lines
# 2. Increase dbpurgeage defined in fail2ban.conf to e.g. 648000 (7.5 days)
# to maintain entries for failed logins for sufficient amount of time
[recidive]
logpath = /var/log/fail2ban.log
banaction = %(banaction_allports)s
bantime = 1w
findtime = 1d
# Generic filter for PAM. Has to be used with action which bans all
# ports such as iptables-allports, shorewall
[pam-generic]
# pam-generic filter can be customized to monitor specific subset of 'tty's
banaction = %(banaction_allports)s
logpath = %(syslog_authpriv)s
backend = %(syslog_backend)s
[xinetd-fail]
banaction = iptables-multiport-log
logpath = %(syslog_daemon)s
backend = %(syslog_backend)s
maxretry = 2
# stunnel - need to set port for this
[stunnel]
logpath = /var/log/stunnel4/stunnel.log
[ejabberd-auth]
port = 5222
logpath = /var/log/ejabberd/ejabberd.log
[counter-strike]
logpath = /opt/cstrike/logs/L[0-9]*.log
tcpport = 27030,27031,27032,27033,27034,27035,27036,27037,27038,27039
udpport = 1200,27000,27001,27002,27003,27004,27005,27006,27007,27008,27009,27010,27011,27012,27013,27014,27015
action_ = %(default/action_)s[name=%(__name__)s-tcp, port="%(tcpport)s", protocol="tcp"]
%(default/action_)s[name=%(__name__)s-udp, port="%(udpport)s", protocol="udp"]
[softethervpn]
port = 500,4500
protocol = udp
logpath = /usr/local/vpnserver/security_log/*/sec.log
[gitlab]
port = http,https
logpath = /var/log/gitlab/gitlab-rails/application.log
[grafana]
port = http,https
logpath = /var/log/grafana/grafana.log
[bitwarden]
port = http,https
logpath = /home/*/bwdata/logs/identity/Identity/log.txt
[centreon]
port = http,https
logpath = /var/log/centreon/login.log
# consider low maxretry and a long bantime
# nobody except your own Nagios server should ever probe nrpe
[nagios]
logpath = %(syslog_daemon)s ; nrpe.cfg may define a different log_facility
backend = %(syslog_backend)s
maxretry = 1
[oracleims]
# see "oracleims" filter file for configuration requirement for Oracle IMS v6 and above
logpath = /opt/sun/comms/messaging64/log/mail.log_current
banaction = %(banaction_allports)s
[directadmin]
logpath = /var/log/directadmin/login.log
port = 2222
[portsentry]
logpath = /var/lib/portsentry/portsentry.history
maxretry = 1
[pass2allow-ftp]
# this pass2allow example allows FTP traffic after successful HTTP authentication
port = ftp,ftp-data,ftps,ftps-data
# knocking_url variable must be overridden to some secret value in jail.local
knocking_url = /knocking/
filter = apache-pass[knocking_url="%(knocking_url)s"]
# access log of the website with HTTP auth
logpath = %(apache_access_log)s
blocktype = RETURN
returntype = DROP
action = %(action_)s[blocktype=%(blocktype)s, returntype=%(returntype)s,
actionstart_on_demand=false, actionrepair_on_unban=true]
bantime = 1h
maxretry = 1
findtime = 1
[murmur]
# AKA mumble-server
port = 64738
action_ = %(default/action_)s[name=%(__name__)s-tcp, protocol="tcp"]
%(default/action_)s[name=%(__name__)s-udp, protocol="udp"]
logpath = /var/log/mumble-server/mumble-server.log
[screensharingd]
# For Mac OS Screen Sharing Service (VNC)
logpath = /var/log/system.log
logencoding = utf-8
[haproxy-http-auth]
# HAProxy by default doesn't log to file you'll need to set it up to forward
# logs to a syslog server which would then write them to disk.
# See "haproxy-http-auth" filter for a brief cautionary note when setting
# maxretry and findtime.
logpath = /var/log/haproxy.log
[slapd]
port = ldap,ldaps
logpath = /var/log/slapd.log
[domino-smtp]
port = smtp,ssmtp
logpath = /home/domino01/data/IBM_TECHNICAL_SUPPORT/console.log
[phpmyadmin-syslog]
port = http,https
logpath = %(syslog_authpriv)s
backend = %(syslog_backend)s
[zoneminder]
# Zoneminder HTTP/HTTPS web interface auth
# Logs auth failures to apache2 error log
port = http,https
logpath = %(apache_error_log)s
[traefik-auth]
# to use 'traefik-auth' filter you have to configure your Traefik instance,
# see `filter.d/traefik-auth.conf` for details and service example.
port = http,https
logpath = /var/log/traefik/access.log
[scanlogd]
logpath = %(syslog_local0)s
banaction = %(banaction_allports)s
[monitorix]
port = 8080
logpath = /var/log/monitorix-httpd

View File

@ -1,24 +0,0 @@
Include /etc/ssh/sshd_config.d/*.conf
Port 22
PermitRootLogin yes
PubkeyAuthentication yes
PasswordAuthentication no
KbdInteractiveAuthentication no
UsePAM yes
AllowAgentForwarding yes
X11Forwarding no
PrintMotd no
PrintLastLog yes
TCPKeepAlive yes
ClientAliveInterval 300
ClientAliveCountMax 1
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
# override default of no subsystems
Subsystem sftp /usr/lib/openssh/sftp-server

View File

@ -1,123 +0,0 @@
---
# set hostname
- name: Set a hostname specifying strategy
ansible.builtin.hostname:
name: "{{ inventory_hostname }}"
use: systemd
# docker
- name: install dependencies
apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg-agent
- software-properties-common
- sudo
- systemd-timesyncd
state: latest
update_cache: yes
- name: Update and upgrade apt packages
become: true
apt:
upgrade: yes
update_cache: yes
cache_valid_time: 86400 #One day
- name: enable systemd-timesyncd
ansible.builtin.systemd_service:
name: systemd-timesyncd
state: restarted
enabled: true
daemon_reload: true
- name: purge ntp
apt:
name:
- ntp
state: absent
- name: docker GPG key
apt_key:
url: https://download.docker.com/linux/debian/gpg
state: present
- name: repository docker
apt_repository:
repo: deb https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable
state: present
- name: install docker
apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
state: latest
update_cache: yes
- name: enable docker
ansible.builtin.systemd_service:
name: docker
state: restarted
enabled: true
daemon_reload: true
- name: copy docker-compose@.service
copy:
src: ../files/docker-compose@.service
dest: /etc/systemd/system/docker-compose@.service
owner: root
group: root
mode: u=rw,g=r,o=r
- name: ensure /etc/docker/compose exist
file:
path: /etc/docker/compose
state: directory
owner: root
group: root
mode: 0700
# SSH
- name: Copy sshd_config
copy:
src: ../files/sshd_config
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: u=rw,g=r,o=r
- name: restart sshd
service: name=sshd state=restarted enabled=yes
# FIREWALL
- name: install UFW
apt: name=ufw state=latest
- name: allow ssh from everywhere and enable
ufw:
rule: allow
name: OpenSSH
state: enabled
- name: restart ufw
service: name=ufw state=restarted enabled=yes
# FAIL2BAN
- name: install fail2ban
apt: name=fail2ban state=latest
- name: Copy jail.conf
copy:
src: ../files/jail.conf
dest: /etc/fail2ban/jail.conf
owner: root
group: root
mode: u=rw,g=r,o=r
- name: restart fail2ban
service: name=fail2ban state=restarted enabled=yes

View File

@ -1,12 +0,0 @@
options {
directory "/var/cache/bind";
recursion no;
allow-transfer { none; };
allow-query { any; };
auth-nxdomain no; # conform to RFC1035
listen-on-v6 { any; };
};

View File

@ -1,30 +0,0 @@
---
- name: install BIND
apt: name=bind9 state=latest
- name: install BIND-utils
apt: name=bind9-utils state=latest
- name: copy named.conf.options
copy:
src: ../files/named.conf.options
dest: /etc/bind/named.conf.options
owner: bind
group: bind
mode: 0644
- name: restart & enable BIND
service: name=named state=restarted enabled=yes
- name: allow dns from everywhere via udp
ufw:
rule: allow
port: '53'
proto: udp
- name: allow dns from everywhere via tcp
ufw:
rule: allow
port: '53'
proto: tcp
- name: restart ufw
service: name=ufw state=restarted enabled=yes

View File

@ -1,22 +0,0 @@
---
- name: ensure drone docker/compose exist
file:
path: /etc/docker/compose/drone
state: directory
owner: root
group: root
mode: 0700
- name: build drone docker-compose.yml.j2
template:
src: ../templates/docker-compose.yml.j2
dest: /etc/docker/compose/drone/docker-compose.yml
owner: root
group: root
mode: u=rw,g=r,o=r
- name: daemon-reload and enable drone
ansible.builtin.systemd_service:
state: restarted
enabled: true
name: docker-compose@drone

View File

@ -1,29 +0,0 @@
version: '3'
services:
drone:
container_name: drone
image: drone/drone:latest
volumes:
- ./drone:/data
ports:
- "127.0.0.1:2201:80"
environment:
- DRONE_GITEA_SERVER=https://git.simponic.xyz
- DRONE_GITEA_CLIENT_ID={{ drone_gitea_client_id }}
- DRONE_GITEA_CLIENT_SECRET={{ drone_gitea_client_secret }}
- DRONE_GIT_ALWAYS_AUTH=true
- DRONE_SERVER_PROTO=https
- DRONE_SERVER_HOST=drone.internal.simponic.xyz
- DRONE_RPC_SECRET={{ drone_rpc_secret }}
drone-runner:
container_name: drone_runner
image: drone/drone-runner-docker:latest
userns_mode: 'host' # Needed to get access to docker socket
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- DRONE_RPC_SECRET={{ drone_rpc_secret }}
- DRONE_RPC_HOST=drone:80
- DRONE_RPC_PROTO=http
- DRONE_RUNNER_CAPACITY=4

View File

@ -1 +0,0 @@
app.ini

View File

@ -1,46 +0,0 @@
---
- name: ensure gitea docker/compose exist
file:
path: /etc/docker/compose/gitea
state: directory
owner: root
group: root
mode: 0700
- name: create gitea docker/compose/data/gitea with set uid/gid
file:
path: /etc/docker/compose/gitea/data
state: directory
owner: 1000
group: 1000
mode: 0700
- name: ensure gitea docker/compose/data/gitea exist
file:
path: /etc/docker/compose/gitea/data/gitea/conf
state: directory
owner: 1000
group: 1000
mode: 0700
- name: copy app.ini
copy:
src: ../files/app.ini
dest: /etc/docker/compose/gitea/data/gitea/conf/app.ini
owner: 1000
group: 1000
mode: 0700
- name: build gitea docker-compose.yml.j2
template:
src: ../templates/docker-compose.yml.j2
dest: /etc/docker/compose/gitea/docker-compose.yml
owner: root
group: root
mode: u=rw,g=r,o=r
- name: daemon-reload and enable gitea
ansible.builtin.systemd_service:
state: restarted
enabled: true
name: docker-compose@gitea

View File

@ -1,22 +0,0 @@
version: "3"
networks:
gitea:
external: false
services:
server:
image: gitea/gitea:latest
container_name: gitea
restart: always
networks:
- gitea
volumes:
- ./data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "127.0.0.1:9966:3000"
- "{{ nijika_ip }}:222:22"
dns:
- {{ nameserver_ip }}

View File

@ -1 +0,0 @@
wireguard.cfg

View File

@ -1,39 +0,0 @@
- name: install wireguard
apt:
name:
- wireguard
state: latest
- name: stop wireguard and enable on boot
systemd:
name: wg-quick@hatecomputers
enabled: yes
state: stopped
- name: copy config
ansible.builtin.copy:
src: ../files/wireguard.cfg
dest: /etc/wireguard/hatecomputers.conf
owner: root
group: root
mode: 0600
- name: enable and persist ip forwarding
sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
sysctl_set: yes
reload: yes
- name: start wireguard and enable on boot
systemd:
name: wg-quick@hatecomputers
enabled: yes
state: restarted
- name: allow wireguard endpoint ufw
ufw:
rule: allow
port: '51820'
proto: 'udp'

View File

@ -1,28 +0,0 @@
---
- name: ensure lldap docker/compose exist
file:
path: /etc/docker/compose/lldap
state: directory
owner: root
group: root
mode: 0700
- name: build lldap docker-compose.yml.j2
template:
src: ../templates/docker-compose.yml.j2
dest: /etc/docker/compose/lldap/docker-compose.yml
owner: root
group: root
mode: u=rw,g=r,o=r
- name: daemon-reload and enable lldap
ansible.builtin.systemd_service:
state: restarted
enabled: true
name: docker-compose@lldap
- name: allow ldap on vpn
ufw:
rule: allow
port: '3890'
from: '100.64.0.0/10'

View File

@ -1,18 +0,0 @@
version: "3"
volumes:
lldap_data:
driver: local
services:
lldap:
image: lldap/lldap:stable
ports:
- "{{ johan_ip }}:3890:3890"
- "127.0.0.1:17170:17170"
volumes:
- "lldap_data:/data"
environment:
- LLDAP_JWT_SECRET="{{ lldap_jwt_secret }}"
- LLDAP_LDAP_USER_PASS="{{ lldap_user_pass }}"
- LLDAP_LDAP_BASE_DN=dc=simponic,dc=xyz

View File

@ -1,3 +0,0 @@
virtual_mailbox_domains = /etc/postfix/vhost
virtual_mailbox_maps = ldap:/etc/postfix/ldap-users.cf
virtual_alias_maps =

View File

@ -1,71 +0,0 @@
---
- name: install letsencrypt
apt:
name: letsencrypt
state: latest
- name: allow 80/tcp ufw
ufw:
rule: allow
port: '80'
proto: 'tcp'
- name: allow 443/tcp ufw
ufw:
rule: allow
port: '443'
proto: 'tcp'
- name: restart ufw
service: name=ufw state=restarted enabled=yes
- name: request certificate
shell: >
letsencrypt certonly -n --standalone -d "{{ domain }}" \
-m "{{ certbot_email }}" --agree-tos
args:
creates: "/etc/letsencrypt/live/{{ domain }}"
- name: add monthly letsencrypt cronjob for cert renewal
cron:
name: "letsencrypt_renewal_mail"
day: "18"
hour: "2"
minute: "1"
job: "letsencrypt renew --cert-name {{ domain }} -n --standalone --agree-tos -m {{ certbot_email }}"
- name: ensure mail docker/compose exist
file:
path: /etc/docker/compose/mail
state: directory
owner: root
group: root
mode: 0700
- name: ensure mail docker/compose volume exist
file:
path: /etc/docker/compose/mail/docker-data/dms/config
state: directory
owner: root
group: root
mode: 0700
# https://github.com/docker-mailserver/docker-mailserver/issues/1562
- name: ensure mail docker/compose ldap overrides exist
copy:
src: ../files/postmaster-main.cf
dest: /etc/docker/compose/mail/docker-data/dms/config/postfix-main.cf
- name: build mail docker-compose.yml.j2
template:
src: ../templates/docker-compose.yml.j2
dest: /etc/docker/compose/mail/docker-compose.yml
owner: root
group: root
mode: u=rw,g=r,o=r
- name: daemon-reload and enable mail
ansible.builtin.systemd_service:
state: restarted
enabled: true
name: docker-compose@mail

View File

@ -1,50 +0,0 @@
services:
mailserver:
image: ghcr.io/docker-mailserver/docker-mailserver:latest
container_name: mailserver
# Provide the FQDN of your mail server here (Your DNS MX record should point to this value)
hostname: {{ domain }}
ports:
- "25:25"
- "465:465"
- "587:587"
- "993:993"
- "4190:4190"
volumes:
- ./docker-data/dms/mail-data/:/var/mail/
- ./docker-data/dms/mail-state/:/var/mail-state/
- ./docker-data/dms/mail-logs/:/var/log/mail/
- ./docker-data/dms/config/:/tmp/docker-mailserver/
- /etc/letsencrypt:/etc/letsencrypt
- /etc/localtime:/etc/localtime:ro
environment:
- SSL_TYPE=letsencrypt
- ENABLE_CLAMAV=0
- ENABLE_AMAVIS=0
- ENABLE_MANAGESIEVE=1
- ENABLE_FAIL2BAN=1
- SPOOF_PROTECTION=1
- ACCOUNT_PROVISIONER=LDAP
- LDAP_SERVER_HOST=ldap://lldap.internal.simponic.xyz:3890
- LDAP_SEARCH_BASE=dc=simponic,dc=xyz
- LDAP_BIND_DN=uid=admin,ou=people,dc=simponic,dc=xyz
- LDAP_BIND_PW={{ lldap_admin_pass }}
- LDAP_QUERY_FILTER_USER=(&(objectClass=mailAccount)(|(uid=%u)))
- LDAP_QUERY_FILTER_GROUP=(&(cn=mail)(uniquemember=uid=%u,ou=people,dc=simponic,dc=xyz))
- LDAP_QUERY_FILTER_ALIAS=(&(objectClass=inetOrgPerson)(|(uid=%u)(mail=%u)))
- LDAP_QUERY_FILTER_DOMAIN=(mail=*@%s)
- DOVECOT_AUTH_BIND=yes
- DOVECOT_USER_FILTER=(&(objectClass=inetOrgPerson)(|(uid=%u)(mail=%u)))
- DOVECOT_USER_ATTRS==uid=5000,=gid=5000,=home=/var/mail/%Ln,=mail=maildir:~/Maildir
- ENABLE_SASLAUTHD=1
- SASLAUTHD_MECHANISMS=rimap
- SASLAUTHD_MECH_OPTIONS=127.0.0.1
- POSTMASTER_ADDRESS={{ postmaster_email }}
extra_hosts:
- "lldap.internal.simponic.xyz:{{ johan_ip }}"
dns:
- {{ nameserver_ip }}
restart: always

View File

@ -1,49 +0,0 @@
---
## PRIMARY
- name: create named.conf.local for primary
template:
src: ../templates/named.conf.local.primary.j2
dest: /etc/bind/named.conf.local
owner: bind
group: bind
when: inventory_hostname in groups['dnsprimary']
- name: create /etc/bind/zones if not exist
ansible.builtin.file:
path: /etc/bind/zones
state: directory
owner: bind
group: bind
- name: create primary zone files for primary
template:
src: "../templates/db.{{ item.zone }}.j2"
dest: "/etc/bind/zones/db.{{ item.zone }}"
owner: bind
group: bind
with_items: "{{ dns_zones }}"
when: inventory_hostname in groups['dnsprimary']
## REPLICA
- name: create named.conf.local for replica
template:
src: ../templates/named.conf.local.replica.j2
dest: /etc/bind/named.conf.local
owner: bind
group: bind
when: inventory_hostname in groups['dnsreplica']
- name: flush dns cache on replicas
file: path={{ item }} state=absent
with_fileglob: "/var/cache/bind/db.*"
when: inventory_hostname in groups['dnsreplica']
- name: restart bind9
service:
name: bind9
state: restarted
enabled: true

View File

@ -1,15 +0,0 @@
$TTL 604800
@ IN SOA {{ dns_primary_hostname }}.simponic.xyz. admin.simponic.xyz. (
5 ; Serial
604800 ; Refresh
86400 ; Retry
2419200 ; Expire
604800 ) ; Negative Cache TTL
;
; Name servers
rainrainra.in. IN NS {{ dns_primary_hostname }}.simponic.xyz.
rainrainra.in. IN NS {{ dns_replica_hostname }}.simponic.xyz.
; Other A records
@ IN A 23.95.214.176

View File

@ -1,16 +0,0 @@
$TTL 604800
@ IN SOA {{ dns_primary_hostname }}.simponic.xyz. admin.simponic.xyz. (
5 ; Serial
604800 ; Refresh
86400 ; Retry
2419200 ; Expire
604800 ) ; Negative Cache TTL
;
; Name servers
rileyandlizzy.wedding. IN NS {{ dns_primary_hostname }}.simponic.xyz.
rileyandlizzy.wedding. IN NS {{ dns_replica_hostname }}.simponic.xyz.
; Other A records
@ IN A 129.123.76.14
www IN A 129.123.76.14

Some files were not shown because too many files have changed in this diff Show More