Compare commits

...

124 Commits
dev ... main

Author SHA1 Message Date
Alpha Chen bbe328132b
ubuntu
3 weeks ago
Alpha Chen dd58ffddd7
linux
4 weeks ago
Alpha Chen a43f7d9f6a
authelia 4.38.10
4 weeks ago
Alpha Chen a0b618624c
bump versions
4 weeks ago
Alpha Chen e1b216cf2c
rm ddclient
2 months ago
Alpha Chen 93f50da427
update paperless-ngx
2 months ago
Alpha Chen b75c5dbe62
bump miniflux and authelia
2 months ago
Alpha Chen bd9cb4f977
smokeping, pi-hole
3 months ago
Alpha Chen 30c77065ee
bump stract weight in searxng
3 months ago
Alpha Chen 7decc855ba
caddy
3 months ago
Alpha Chen e37cbca553
homebrew
3 months ago
Alpha Chen a75f3de22b
paperless-ngx
4 months ago
Alpha Chen 89351b442b
defaults
4 months ago
Alpha Chen f06f1d7eb2
paperless-ngx -> 2.8.3
4 months ago
Alpha Chen cdcc07e0bd
authelia -> 4.38.8
5 months ago
Alpha Chen 543d9bfde4
paperless-ngx -> 2.7.2
5 months ago
Alpha Chen d9b06d09cb
miniflux -> 2.1.3
5 months ago
Alpha Chen c03165db53
carve out for current paperless url
5 months ago
Alpha Chen b4a9123c87
mu
5 months ago
Alpha Chen d277f4a18e
move paperless domain
5 months ago
Alpha Chen 080154b530
searxng
6 months ago
Alpha Chen 7eb7115bf9
fastmail terraform module
6 months ago
Alpha Chen 990fe523b3
[searxng] disable http2
6 months ago
Alpha Chen b825a76c10
mu
6 months ago
Alpha Chen c82bfb319b
mu
6 months ago
Alpha Chen 9cfa402f2f
searxng autocomplete
6 months ago
Alpha Chen 1e2cfdedae
bump miniflux and authelia
6 months ago
Alpha Chen 27e7ede3fe
mu
6 months ago
Alpha Chen b534bf4dc0
pam-reattach is now in core
6 months ago
Alpha Chen 4dd503b2a5
Authelia v4.38
6 months ago
Alpha Chen dfbb676841
version bumping
6 months ago
Alpha Chen 05cc953e0d
cap docker logs
6 months ago
Alpha Chen d1b3877949
enable stract in searxng
6 months ago
Alpha Chen 44d147db75
paperless-ngx v2.6.2
6 months ago
Alpha Chen 42c73be4cb
rename paperless back to paperless-ngx
7 months ago
Alpha Chen 90467023fd
miniflux -> 2.1.0
7 months ago
Alpha Chen 3ba89ef569
remove brave as an engine in searxng
7 months ago
Alpha Chen ce5d153545
switch from fasd to zoxide
7 months ago
Alpha Chen a48e272418
paperless -> 2.5.3
7 months ago
Alpha Chen 749f00b624
bump paperless version
7 months ago
Alpha Chen 66251ff393
fix typo for loki retention
7 months ago
Alpha Chen cfcec1d3ba
add retention to loki
7 months ago
Alpha Chen 3aded633d5
use an actual user:pass for parseable
7 months ago
Alpha Chen 4495790679
oops more paperless
8 months ago
Alpha Chen 16d6b48f5b
more logging
8 months ago
Alpha Chen d5eadcc4db
s/paperless-ngx/paperless/
8 months ago
Alpha Chen 9c1c82017a
logs
8 months ago
Alpha Chen 995ed3504b
removed commented out env vars for searxng
8 months ago
Alpha Chen b3e63a6694
mu
8 months ago
Alpha Chen 42c4cc6bc7
mu
8 months ago
Alpha Chen 9d617c8686
auth
8 months ago
Alpha Chen af19f7746c
paperless-ngx o11y
8 months ago
Alpha Chen 26e7a4981d
PAPERLESS_WEBSERVER_WORKERS=1
8 months ago
Alpha Chen 4239cce3fd
paperless-ngx
8 months ago
Alpha Chen 099bc705d9
redis
8 months ago
Alpha Chen b813c0ce73
mailmate
8 months ago
Alpha Chen 52f5047773
redis
8 months ago
Alpha Chen b0f6f42546
showWindowTitlebarIcons
8 months ago
Alpha Chen 46e35eea82
mu
8 months ago
Alpha Chen 43bf24f5a8
dev
8 months ago
Alpha Chen 3d424223c9
mailmate
8 months ago
Alpha Chen c159169cf9
miniflux v2.0.51
9 months ago
Alpha Chen da44c93ee9
calibre-web
10 months ago
Alpha Chen 52e6f208bf
bump pi-hole
10 months ago
Alpha Chen b13ead1a8f
lotus-land-story subdomains
10 months ago
Alpha Chen a4c3ff8c7b
set ttl to 65
10 months ago
Alpha Chen a85a02009a
minifux v2.0.49 -> v2.0.50
10 months ago
Alpha Chen 14b2718b5c
lint macos playbook
10 months ago
Alpha Chen 75b1eed026
install symbols-only nerd font
10 months ago
Alpha Chen 44b3936107
ingest akkoma metrics
10 months ago
Alpha Chen c70875cbdc
pihole:2023.10.0
11 months ago
Alpha Chen f70e7b7eab
miniflux v2.0.49
11 months ago
Alpha Chen 71237d933e
kill woodpecker and firefly iii
11 months ago
Alpha Chen 4bfc0c33bb
mu
12 months ago
Alpha Chen 85ffb4e1b2
miniflux 🤝 authelia
1 year ago
Alpha Chen c703a98e74
use authelia for gitea
1 year ago
Alpha Chen 0bdd9c40b2
authelia
1 year ago
Alpha Chen 073ead5852
mu
1 year ago
Alpha Chen 920f1f520e
mu
1 year ago
Alpha Chen 327b9252e9
resize lotus-land-story, tweak ufw
1 year ago
Alpha Chen 8ab3b4fee7
firefly iii
1 year ago
Alpha Chen 5808ed502f
mu
1 year ago
Alpha Chen 7d1b92a9da
mu
1 year ago
Alpha Chen dbe8bd5f14
mu
1 year ago
Alpha Chen c710ca84aa
only allow non-web traffic over tailscale
1 year ago
Alpha Chen 5484308a72
expose rsyslog
1 year ago
Alpha Chen 99b80cc8d8
promtail <3 rsyslog
1 year ago
Alpha Chen 4b9840049b
mu
1 year ago
Alpha Chen 45a36897a7
fix terminfo for tmux-256color
1 year ago
Alpha Chen b811157d18
mu
1 year ago
Alpha Chen 2b718e4d49
Use tsdb for loki, set retention for prometheus
1 year ago
Alpha Chen 70d83c691e
add mounts to node-exporter
1 year ago
Alpha Chen 858770829f
cleanup
1 year ago
Alpha Chen 223020f5cd
woodpecker
1 year ago
Alpha Chen cc53708789
golink
1 year ago
Alpha Chen 38c023e314
Populate network from docker itself
1 year ago
Alpha Chen 2f3fb21ab6
Bump miniflux, add miniflux to prometheus
1 year ago
Alpha Chen 74a8c5522e
pin pihole version
1 year ago
Alpha Chen 6d20005806
tailscale
1 year ago
Alpha Chen 82e7d01f9f
mu
1 year ago
Alpha Chen 012f7cd6cf
enable backups
1 year ago
Alpha Chen 2ce25ecc08
use ansible instead of docker compose
1 year ago
Alpha Chen 58c05cbe00
logging
1 year ago
Alpha Chen 7f6c7eb9a1
Split up main.yml into multiple playbooks
1 year ago
Alpha Chen 7690c4596d
o11y
1 year ago
Alpha Chen 282c92fa68
miniflux db password
1 year ago
Alpha Chen 3ba47f978c
pip
1 year ago
Alpha Chen ee54fd71aa
mu
1 year ago
Alpha Chen c005dfa87f
tweak secrets management
1 year ago
Alpha Chen e5a2d3b7bb
no count
1 year ago
Alpha Chen e87263d962
reverse proxy w/caddy
1 year ago
Alpha Chen 40568732df
mu
1 year ago
Alpha Chen 154b2dd4d9
miniflux running (without ssl)
1 year ago
Alpha Chen 4af3162205
lotus-land-story initial setup
1 year ago
Alpha Chen fd12c3aaaa
ignore host_vars
1 year ago
Alpha Chen 773aa0cb62
terraform dns
2 years ago
Alpha Chen 282be93943
ramble-hard tailscale
2 years ago
Alpha Chen 0557631cd7
rm lets_encrypt
2 years ago
Alpha Chen 60955c1789
ramble-hard/lets_encrypt
2 years ago
Alpha Chen 7a78a84621
on-fire-within
2 years ago
Alpha Chen 91cf63398e
ramble-hard
2 years ago
Alpha Chen 58b65320e2
[on-fire-within] bump traefik
2 years ago
Alpha Chen 138dd0e3f1
on-fire-within
2 years ago
Alpha Chen e5b090c19f
mu
2 years ago

@ -0,0 +1,2 @@
exclude_paths:
- .terraform/

7
.gitignore vendored

@ -0,0 +1,7 @@
.terraform
terraform.tfstate*
terraform.tfvars
domain_records.tf
host_vars

@ -0,0 +1,24 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/linode/linode" {
version = "2.18.0"
constraints = "2.18.0"
hashes = [
"h1:CeGbgARw4BrgUrEfPVRUjwoPI5WrCtA4bG39ckFHrlQ=",
"zh:03bc6644991818395759757cc5a3f07b6a8f7f797f04d701441e79e85cb9dc69",
"zh:5248ed1626e1531616fd227840c7734b8f2d13631427d80672b615e60f106a2d",
"zh:80a890b44a66639b1dabc2773e5dbb5f22a8e73495ce167a5064296463e17cdc",
"zh:874f5794a567250fc873af73c8c67a1980eb30de864ef76eb772ae492cf530ba",
"zh:94469b62cc27ce53fcd06a48b84de256136576699602ba68d157b2ad033ac0ed",
"zh:a1b9096702e1ee197f84634159d512b54d9223d00ff850ff4073eb9e5ac5eb9d",
"zh:a39c77dbf770a349529596124d4188fc7eeb8ecaf88ebc06f586d884f68df550",
"zh:bd5ee9da38d1846edc6af87771ede950bb703d99f78756c05da00fc0ca325be5",
"zh:c4b1d74d8d44ed258ca96a2b76d0101858672624a8d9731df8a97b8373dba728",
"zh:c69db2d6e4d7b04f9bc2f00223e61ae12127946043b07bca567fa3883bf5c071",
"zh:e818b9ea84e55ce1d2eb1f8852ec6606ce5d3691f356ea4eb3a701e5986ed339",
"zh:eeb7e6d518f62d6f02b497dbb5427a7a8ea38e2e5dddc8a114f7989a7a8c47f7",
"zh:fa77abf56e34701cdc49799cc13c6298be184d7cb48de20ee8b30e93fb3735a0",
"zh:fc3e94acd6fb2ad6b526169896fc6821298ba394a4d181c9b2448341e2e0eb54",
]
}

@ -0,0 +1,5 @@
# frozen_string_literal: true
source "https://rubygems.org"
gem "rake"

@ -0,0 +1,13 @@
GEM
remote: https://rubygems.org/
specs:
rake (13.0.6)
PLATFORMS
arm64-darwin-22
DEPENDENCIES
rake
BUNDLED WITH
2.4.1

@ -1,5 +1,5 @@
[defaults]
inventory = hosts.yml
inventory = hosts.yml,hosts.local
# [privilege_escalation]
# become_ask_pass = True

@ -1,4 +1,4 @@
- hosts: all
- hosts: os_MacOSX
tasks:
- name: set OS X defaults
@ -8,242 +8,261 @@
type: "{{ item.type }}"
value: "{{ item.value }}"
notify: restart OS X system services
vars:
mailmate:
allowed_image_regexps:
- https://((i|images|d)\.gr-assets\.com
- www\.goodreads\.com
- massdrop-s3\.imgix\.net
- .*\.cloudfront\.net
- s3\.amazonaws\.com
- files\.convertkitcdn\.com/assets/pictures)/.*
with_items:
# NSGlobalDomain defaults
- # don't quit idle applications
domain: -g
key: NSDisableAutomaticTermination
type: bool
value: true
- # disable font smoothing
key: AppleFontSmoothing
type: int
value: 0
- # full keyboard access
key: AppleKeyboardUIMode
type: int
value: 3
- # show all extensions by default
key: AppleShowAllExtensions
type: bool
value: true
- # keyboard repeat rate
key: KeyRepeat
type: int
value: 2
- # delay before keyboard repeat
key: InitialKeyRepeat
type: int
value: 25
- # set sidebar item size to small
key: NSTableViewDefaultSizeMode
type: int
value: 1
- # disable resume
key: NSQuitAlwaysKeepsWindows
type: bool
value: false
- # add debug menu in web views
key: WebKitDeveloperExtras
type: bool
value: true
- # tap to click
key: com.apple.mouse.tapBehavior
type: bool
value: true
- # only show scrollbars when scrolling
key: AppleShowScrollBars
type: string
value: WhenScrolling
# Safari
- # enable Debug menu in Safari
domain: com.apple.Safari
key: IncludeInternalDebugMenu
type: bool
value: true
- # disable Java
domain: com.apple.Safari
key: com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaEnabled
type: bool
value: false
- # disable Java
domain: com.apple.Safari
key: com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaEnabledForLocalFiles
type: bool
value: false
- # disable dashboard
domain: com.apple.dashboard
key: mcx-disabled
type: bool
value: true
- # don't write .DS_Store to network volumes
domain: com.apple.desktopservices
key: DSDontWriteNetworkStores
type: bool
value: true
# Dock defaults
- # automatically hide and show the dock
domain: com.apple.dock
key: autohide
type: bool
value: true
- # minimize windows using the scale effect
domain: com.apple.dock
key: mineffect
type: string
value: scale
- # don't rearrange spaces
domain: com.apple.dock
key: mru-spaces
type: bool
value: false
- domain: com.apple.dock
key: orientation
type: string
value: left
- # set the bottom left hot corner to sleep the display
domain: com.apple.dock
key: wvous-bl-corner
type: int
value: 10
- # set the icon size to 36 pixels
domain: com.apple.dock
key: tilesize
type: int
value: 36
- # no dock delay
domain: com.apple.dock
key: autohide-delay
type: float
value: 0
# Trackpad settings
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: Clicking
type: int
value: 1
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: TrackpadFourFingerVertSwipeGesture
type: int
value: 0
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: TrackpadThreeFingerDrag
type: bool
value: true
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: TrackpadThreeFingerHorizSwipeGesture
type: int
value: 0
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: TrackpadThreeFingerVertSwipeGesture
type: int
value: 0
# Finder defaults
- # don't ask when changing file extension
domain: com.apple.finder
key: FXEnableExtensionChangeWarning
type: bool
value: false
- # default to list view
domain: com.apple.finder
key: FXPreferredViewStyle
type: string
value: Nlsv
- # enable text selection in QuickLook
domain: com.apple.finder
key: QLEnableTextSelection
type: bool
value: true
- # show full path in Finder
domain: com.apple.finder
key: _FXShowPosixPathInTitle
type: bool
value: true
- # remove the proxy icon hover delay
domain: com.apple.Finder
key: NSToolbarTitleViewRolloverDelay
type: float
value: 0
- # show the proxy icon and older titlebar
# https://twitter.com/chucker/status/1395843084383043584
domain: com.apple.Finder
key: NSWindowSupportsAutomaticInlineTitle
type: bool
value: false
- # set date format in menubar
domain: com.apple.menuextra.clock
key: DateFormat
type: string
value: h:mm
- # no window shadows when capturing windows
domain: com.apple.screencapture
key: disable-shadow
type: bool
value: true
- domain: com.apple.screencapture
key: location
type: string
value: "{{ ansible_env.HOME }}/Downloads"
- domain: com.apple.screensaver
key: askForPassword
type: int
value: 1
- domain: com.apple.Terminal
key: ShowLineMarks
type: bool
value: false
# Other applications
- domain: com.google.Chrome
key: AppleEnableSwipeNavigateWithScrolls
type: bool
value: false
- domain: org.vim.MacVim
key: MMLastWindowClosedBehavior
type: int
value: 2
- domain: org.vim.MacVim
key: MMUntitledWindow
type: int
value: 1
- domain: org.vim.MacVim
key: SUCheckAtStartup
type: int
value: 1
- domain: org.vim.MacVim
key: SUEnableAutomaticChecks
type: int
value: 1
- domain: com.freron.MailMate
key: MmAllowedImageURLRegexp
type: string
value: https://((i|images|d)\.gr-assets\.com|www\.goodreads\.com|massdrop-s3\.imgix\.net|.*\.cloudfront\.net|s3\.amazonaws\.com|files\.convertkitcdn\.com/assets/pictures)/.*
- domain: com.freron.MailMate
key: MmSendMessageDelayEnabled
type: bool
value: true
- domain: com.freron.MailMate
key: MmSendMessageDelay
type: int
value: 60
- # don't quit idle applications
domain: -g
key: NSDisableAutomaticTermination
type: bool
value: true
- # disable font smoothing
key: AppleFontSmoothing
type: int
value: 0
- # full keyboard access
key: AppleKeyboardUIMode
type: int
value: 3
- # show all extensions by default
key: AppleShowAllExtensions
type: bool
value: true
- # keyboard repeat rate
key: KeyRepeat
type: int
value: 2
- # delay before keyboard repeat
key: InitialKeyRepeat
type: int
value: 25
- # set sidebar item size to small
key: NSTableViewDefaultSizeMode
type: int
value: 1
- # disable resume
key: NSQuitAlwaysKeepsWindows
type: bool
value: false
- # add debug menu in web views
key: WebKitDeveloperExtras
type: bool
value: true
- # tap to click
key: com.apple.mouse.tapBehavior
type: bool
value: true
- # only show scrollbars when scrolling
key: AppleShowScrollBars
type: string
value: WhenScrolling
- # move windows by holding ctrl+cmd and dragging any part of the window
domain: -g
key: NSWindowShouldDragOnGesture
type: bool
value: true
# Safari
- # enable Debug menu in Safari
domain: com.apple.Safari
key: IncludeInternalDebugMenu
type: bool
value: true
- # disable Java
domain: com.apple.Safari
key: com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaEnabled
type: bool
value: false
- # disable Java
domain: com.apple.Safari
key: com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaEnabledForLocalFiles
type: bool
value: false
- # disable dashboard
domain: com.apple.dashboard
key: mcx-disabled
type: bool
value: true
- # don't write .DS_Store to network volumes
domain: com.apple.desktopservices
key: DSDontWriteNetworkStores
type: bool
value: true
# Dock defaults
- # automatically hide and show the dock
domain: com.apple.dock
key: autohide
type: bool
value: true
- # minimize windows using the scale effect
domain: com.apple.dock
key: mineffect
type: string
value: scale
- # don't rearrange spaces
domain: com.apple.dock
key: mru-spaces
type: bool
value: false
- domain: com.apple.dock
key: orientation
type: string
value: left
- # set the bottom left hot corner to sleep the display
domain: com.apple.dock
key: wvous-bl-corner
type: int
value: 10
- # set the icon size to 36 pixels
domain: com.apple.dock
key: tilesize
type: int
value: 36
- # no dock delay
domain: com.apple.dock
key: autohide-delay
type: float
value: 0
# Trackpad settings
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: Clicking
type: int
value: 1
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: TrackpadFourFingerVertSwipeGesture
type: int
value: 0
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: TrackpadThreeFingerDrag
type: bool
value: true
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: TrackpadThreeFingerHorizSwipeGesture
type: int
value: 0
- domain: com.apple.driver.AppleBluetoothMultitouch.trackpad
key: TrackpadThreeFingerVertSwipeGesture
type: int
value: 0
# Finder defaults
- # don't ask when changing file extension
domain: com.apple.finder
key: FXEnableExtensionChangeWarning
type: bool
value: false
- # default to list view
domain: com.apple.finder
key: FXPreferredViewStyle
type: string
value: Nlsv
- # enable text selection in QuickLook
domain: com.apple.finder
key: QLEnableTextSelection
type: bool
value: true
- # show full path in Finder
domain: com.apple.finder
key: _FXShowPosixPathInTitle
type: bool
value: true
- # remove the proxy icon hover delay
domain: com.apple.Finder
key: NSToolbarTitleViewRolloverDelay
type: float
value: 0
- # show the proxy icon and older titlebar
# https://twitter.com/chucker/status/1395843084383043584
domain: com.apple.Finder
key: NSWindowSupportsAutomaticInlineTitle
type: bool
value: false
- # show the proxy icon always
domain: com.apple.universalaccess
key: showWindowTitlebarIcons
type: bool
value: true
- # set date format in menubar
domain: com.apple.menuextra.clock
key: DateFormat
type: string
value: h:mm
- # no window shadows when capturing windows
domain: com.apple.screencapture
key: disable-shadow
type: bool
value: true
- domain: com.apple.screencapture
key: location
type: string
value: "{{ ansible_env.HOME }}/Downloads"
- domain: com.apple.screensaver
key: askForPassword
type: int
value: 1
- domain: com.apple.Terminal
key: ShowLineMarks
type: bool
value: false
# Other applications
- domain: com.google.Chrome
key: AppleEnableSwipeNavigateWithScrolls
type: bool
value: false
- domain: org.vim.MacVim
key: MMLastWindowClosedBehavior
type: int
value: 2
- domain: org.vim.MacVim
key: MMUntitledWindow
type: int
value: 1
- domain: org.vim.MacVim
key: SUCheckAtStartup
type: int
value: 1
- domain: org.vim.MacVim
key: SUEnableAutomaticChecks
type: int
value: 1
- domain: com.freron.MailMate
key: MmAllowedImageURLRegexp
type: string
value: "{{ mailmate.allowed_image_regexps | join('|') }}"
- domain: com.freron.MailMate
key: MmSendMessageDelayEnabled
type: bool
value: true
- domain: com.freron.MailMate
key: MmSendMessageDelay
type: int
value: 60
handlers:
@ -253,3 +272,5 @@
- Finder
- Dock
- SystemUIServer
# vim: ft=yaml.ansible

@ -1,20 +1,17 @@
- hosts: all
- name: Sync dotfiles
hosts: dev
tasks:
- name: List dotfiles
shell: ls -A ~/.dotfiles | grep '^\.'
register: ls_dotfiles
- name: List dotfiles # noqa: risky-shell-pipe
ansible.builtin.shell: ls -A ~/.dotfiles | grep '^\.'
register: ls_dotfiles
changed_when: false
- name: Symlink dotfiles
file:
src: ~/.dotfiles/{{ item }}
dest: ~/{{ item }}
state: link
loop: "{{ ls_dotfiles.stdout_lines | difference(['.git', '.gitmodules']) }}"
- name: Symlink dotfiles
ansible.builtin.file:
src: ~/.dotfiles/{{ item }}
dest: ~/{{ item }}
state: link
loop: "{{ ls_dotfiles.stdout_lines | difference(['.git', '.gitmodules', '.DS_Store']) }}"
- name: Symlink Prezto runcoms
file:
src: "{{ item }}"
dest: ~/.{{ item | basename }}
state: link
with_fileglob: "~/.zprezto/runcoms/z*"
# vim: ft=yaml.ansible

@ -1,49 +1,68 @@
- hosts: all
- name: Install homebrew formulae
hosts: os_MacOSX
tasks:
- community.general.homebrew:
- name: Install Homebrew formulae
community.general.homebrew:
name: "{{ item }}"
loop:
- chruby
- difftastic
- direnv
- efm-langserver
- entr
- exa
- fasd
- fd
- fzf
- git
- git-lfs
- jq
- luarocks
- neovim
- ripgrep
- ruby-install
- shellcheck
- svn # required for source code pro
- tmux
- tree
- zsh
- aerospace
- chruby
- difftastic
- direnv
- docker
- docker-compose
- efm-langserver
- entr
- eza
- fd
- fzf
- git
- git-lfs
- jq
- kitty
- luarocks
- pam-reattach
- ripgrep
- ruby-install
- shellcheck
- tmux
- tree
- zoxide
- zsh
- fabianishere/personal/pam_reattach
- felixkratz/formulae/sketchybar
- community.general.homebrew_cask:
- name: Install Homebrew formulae from HEAD
community.general.homebrew:
name: "{{ item }}"
state: head
loop:
- alfred
- bartender
- dash
- fantastical
- firefox
- google-chrome
- hammerspoon
- mailmate
- obsidian
- slack
- topnotch
- zoom
- neovim
# - homebrew/cask-fonts/font-source-code-pro
- homebrew/cask-fonts/font-sauce-code-pro-nerd-font
- homebrew/cask-versions/firefox-developer-edition
- name: Install Homebrew casks
community.general.homebrew_cask:
name: "{{ item }}"
loop:
- alfred
- arc
- fantastical
- firefox-developer-edition
- hammerspoon
- mailmate
- obsidian
- orbstack
- slack
- zoom
- font-source-code-pro
- font-symbols-only-nerd-font
- name: Heed docker-compose caveats
block:
- name: Create Docker CLI plugins config dir
ansible.builtin.file:
dest: ~/.docker/cli-plugins
state: directory
# vim: ft=yaml.ansible

@ -1,5 +0,0 @@
all:
hosts:
localhost:
ansible_connection: local
ansible_python_interpreter: "{{ansible_playbook_python}}"

@ -1,38 +1,79 @@
- hosts: all
- name: Set up macOS
hosts: all
tasks:
- file: path=~/Library/KeyBindings state=directory
- name: Symlink Emacs-style keybindings for OS X
file:
src: ~/.dotfiles/macos/DefaultKeyBinding.dict
dest: ~/Library/KeyBindings/DefaultKeyBinding.dict
state: link
- file: path=~/Library/Colors state=directory
- name: symlink OS X colors palettes
file: src={{ item }} dest=~/Library/Colors/{{ item | basename }} state=link
with_fileglob: ~/.dotfiles/macos/colors/*
- file: path=~/Library/Dictionaries state=directory
- name: Install Webster's 1913 dictionary
copy:
src: ~/.dotfiles/macos/websters-1913.dictionary/
dest: ~/Library/Dictionaries/websters-1913.dictionary
- name: Enable Touch ID for sudo
block:
- lineinfile:
- name: Create ~/Library directories
ansible.builtin.file:
path: ~/Library/{{ item }}
state: directory
mode: '0755'
loop:
- Colors
- Dictionaries
- KeyBindings
- name: Symlink Emacs-style keybindings for OS X
ansible.builtin.file:
src: ~/.dotfiles/macos/DefaultKeyBinding.dict
dest: ~/Library/KeyBindings/DefaultKeyBinding.dict
state: link
- name: Symlink OS X colors palettes
ansible.builtin.file:
src: "{{ item }}"
dest: ~/Library/Colors/{{ item | basename }}
state: link
with_fileglob: ~/.dotfiles/macos/colors/*
- name: Install Webster's 1913 dictionary
ansible.builtin.copy:
src: ~/.dotfiles/macos/websters-1913.dictionary/
dest: ~/Library/Dictionaries/websters-1913.dictionary
mode: '644'
- name: Enable Touch ID for sudo
become: true
ansible.builtin.lineinfile:
path: /etc/pam.d/sudo
insertafter: '^auth\s+sufficient'
regexp: '^auth\s+sufficient\s+pam_tid.so$'
line: "auth\tsufficient\tpam_tid.so"
line: "auth sufficient pam_tid.so"
# tmux
- shell: brew --prefix
register: brew_prefix
- lineinfile:
- name: Enable Touch ID for sudo in tmux
become: true
ansible.builtin.lineinfile:
path: /etc/pam.d/sudo
insertbefore: '^auth\tsufficient\tpam_tid.so'
regexp: '^auth\s+optional\s+.*pam_reattach.so$'
line: "auth\toptional\t{{ brew_prefix.stdout | trim }}/lib/pam/pam_reattach.so"
become: yes
line: "auth optional /opt/homebrew/lib/pam/pam_reattach.so"
# https://github.com/tmux/tmux/issues/2262
- name: Fix tmux-256color terminfo
block:
- name: Create temporary file
ansible.builtin.tempfile:
state: file
register: tempfile
- name: Create terminfo source file
ansible.builtin.copy:
src: macos/tmux-256color
dest: "{{ tempfile.path }}"
mode: '644'
- name: Install terminfo
ansible.builtin.command: /usr/bin/tic -x {{ tempfile.path }}
changed_when: true
- name: Delete temporary file
ansible.builtin.file:
path: "{{ tempfile.path }}"
state: absent
# https://infosec.exchange/@briankrebs/111434555426146154
- name: Change TTL to 65 for fooling tethering detection
become: true
ansible.builtin.command: sysctl -w {{ item }}=65
loop:
- net.inet.ip.ttl
- net.inet6.ip6.hlim
changed_when: true
# vim: ft=yaml.ansible

@ -0,0 +1,17 @@
- name: Set up MailMate
hosts: all
tasks:
- name: Create KeyBindings dir
ansible.builtin.file:
path: ~/Library/Application Support/MailMate/Resources/KeyBindings
state: directory
mode: '0755'
- name: Symlink key bindings
ansible.builtin.file:
src: ~/.dotfiles/macos/MailMate.plist
dest: ~/Library/Application Support/MailMate/Resources/KeyBindings/Alpha.plist
state: link
# vim: ft=yaml.ansible

@ -0,0 +1,65 @@
# Reconstructed via infocmp from file: /usr/share/terminfo/t/tmux-256color
tmux-256color|tmux with 256 colors,
OTbs, OTpt, am, hs, km, mir, msgr, xenl, AX, G0,
colors#256, cols#80, it#8, lines#24, pairs#32767, U8#1,
acsc=++\,\,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
bel=^G, blink=\E[5m, bold=\E[1m, cbt=\E[Z, civis=\E[?25l,
clear=\E[H\E[J, cnorm=\E[34h\E[?25h, cr=^M,
csr=\E[%i%p1%d;%p2%dr, cub=\E[%p1%dD, cub1=^H,
cud=\E[%p1%dB, cud1=^J, cuf=\E[%p1%dC, cuf1=\E[C,
cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\EM,
cvvis=\E[34l, dch=\E[%p1%dP, dch1=\E[P, dim=\E[2m,
dl=\E[%p1%dM, dl1=\E[M, dsl=\E]0;\007, ed=\E[J, el=\E[K,
el1=\E[1K, enacs=\E(B\E)0, flash=\Eg, fsl=^G, home=\E[H,
ht=^I, hts=\EH, ich=\E[%p1%d@, il=\E[%p1%dL, il1=\E[L,
ind=^J, is2=\E)0, kDC=\E[3;2~, kEND=\E[1;2F, kHOM=\E[1;2H,
kIC=\E[2;2~, kLFT=\E[1;2D, kNXT=\E[6;2~, kPRV=\E[5;2~,
kRIT=\E[1;2C, kbs=\177, kcbt=\E[Z, kcub1=\EOD, kcud1=\EOB,
kcuf1=\EOC, kcuu1=\EOA, kdch1=\E[3~, kend=\E[4~, kf1=\EOP,
kf10=\E[21~, kf11=\E[23~, kf12=\E[24~, kf13=\E[1;2P,
kf14=\E[1;2Q, kf15=\E[1;2R, kf16=\E[1;2S, kf17=\E[15;2~,
kf18=\E[17;2~, kf19=\E[18;2~, kf2=\EOQ, kf20=\E[19;2~,
kf21=\E[20;2~, kf22=\E[21;2~, kf23=\E[23;2~,
kf24=\E[24;2~, kf25=\E[1;5P, kf26=\E[1;5Q, kf27=\E[1;5R,
kf28=\E[1;5S, kf29=\E[15;5~, kf3=\EOR, kf30=\E[17;5~,
kf31=\E[18;5~, kf32=\E[19;5~, kf33=\E[20;5~,
kf34=\E[21;5~, kf35=\E[23;5~, kf36=\E[24;5~,
kf37=\E[1;6P, kf38=\E[1;6Q, kf39=\E[1;6R, kf4=\EOS,
kf40=\E[1;6S, kf41=\E[15;6~, kf42=\E[17;6~,
kf43=\E[18;6~, kf44=\E[19;6~, kf45=\E[20;6~,
kf46=\E[21;6~, kf47=\E[23;6~, kf48=\E[24;6~,
kf49=\E[1;3P, kf5=\E[15~, kf50=\E[1;3Q, kf51=\E[1;3R,
kf52=\E[1;3S, kf53=\E[15;3~, kf54=\E[17;3~,
kf55=\E[18;3~, kf56=\E[19;3~, kf57=\E[20;3~,
kf58=\E[21;3~, kf59=\E[23;3~, kf6=\E[17~, kf60=\E[24;3~,
kf61=\E[1;4P, kf62=\E[1;4Q, kf63=\E[1;4R, kf7=\E[18~,
kf8=\E[19~, kf9=\E[20~, khome=\E[1~, kich1=\E[2~,
kind=\E[1;2B, kmous=\E[M, knp=\E[6~, kpp=\E[5~,
kri=\E[1;2A, nel=\EE, op=\E[39;49m, rc=\E8, rev=\E[7m,
ri=\EM, ritm=\E[23m, rmacs=^O, rmcup=\E[?1049l, rmir=\E[4l,
rmkx=\E[?1l\E>, rmso=\E[27m, rmul=\E[24m,
rs2=\Ec\E[?1000l\E[?25h, sc=\E7,
setab=\E[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m,
setaf=\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m,
sgr=\E[0%?%p6%t;1%;%?%p1%t;3%;%?%p2%t;4%;%?%p3%t;7%;%?%p4%t;5%;%?%p5%t;2%;m%?%p9%t\016%e\017%;,
sgr0=\E[m\017, sitm=\E[3m, smacs=^N, smcup=\E[?1049h,
smir=\E[4h, smkx=\E[?1h\E=, smso=\E[7m, smul=\E[4m,
tbc=\E[3g, tsl=\E]0;, Cr=\E]112\007, Cs=\E]12;%p1%s\007,
E0=\E(B, Ms=\E]52;%p1%s;%p2%s\007, S0=\E(%p1%c,
Se=\E[2 q, Ss=\E[%p1%d q, TS=\E]0;, kDC3=\E[3;3~,
kDC4=\E[3;4~, kDC5=\E[3;5~, kDC6=\E[3;6~, kDC7=\E[3;7~,
kDN=\E[1;2B, kDN3=\E[1;3B, kDN4=\E[1;4B, kDN5=\E[1;5B,
kDN6=\E[1;6B, kDN7=\E[1;7B, kEND3=\E[1;3F, kEND4=\E[1;4F,
kEND5=\E[1;5F, kEND6=\E[1;6F, kEND7=\E[1;7F,
kHOM3=\E[1;3H, kHOM4=\E[1;4H, kHOM5=\E[1;5H,
kHOM6=\E[1;6H, kHOM7=\E[1;7H, kIC3=\E[2;3~, kIC4=\E[2;4~,
kIC5=\E[2;5~, kIC6=\E[2;6~, kIC7=\E[2;7~, kLFT3=\E[1;3D,
kLFT4=\E[1;4D, kLFT5=\E[1;5D, kLFT6=\E[1;6D,
kLFT7=\E[1;7D, kNXT3=\E[6;3~, kNXT4=\E[6;4~,
kNXT5=\E[6;5~, kNXT6=\E[6;6~, kNXT7=\E[6;7~,
kPRV3=\E[5;3~, kPRV4=\E[5;4~, kPRV5=\E[5;5~,
kPRV6=\E[5;6~, kPRV7=\E[5;7~, kRIT3=\E[1;3C,
kRIT4=\E[1;4C, kRIT5=\E[1;5C, kRIT6=\E[1;6C,
kRIT7=\E[1;7C, kUP=\E[1;2A, kUP3=\E[1;3A, kUP4=\E[1;4A,
kUP5=\E[1;5A, kUP6=\E[1;6A, kUP7=\E[1;7A, rmxx=\E[29m,
smxx=\E[9m,

@ -1,37 +1,53 @@
- hosts: all
- name: Main playbook
hosts: dev
tasks:
- group_by: key=os_{{ ansible_distribution }}
tags: always
- name: Group by OS
ansible.builtin.group_by:
key: os_{{ ansible_distribution }}
- ansible.builtin.git:
repo: git@git.kejadlen.dev:alpha/dotfiles.git
dest: ~/.dotfiles.git
bare: true
- name: Check out dotfiles
ansible.builtin.git:
repo: git@git.kejadlen.dev:alpha/dotfiles.git
dest: ~/.dotfiles
version: main
accept_newhostkey: true
# repo can have local changes
ignore_errors: true # noqa: ignore-errors
# macOS things
- name: Import dotfiles playbook
import_playbook: dotfiles.yml
- name: Import terminal profile
ansible.builtin.command: open ~/.macos/Alpha.terminal
# First since this installs tooling used later
- name: Import homebrew playbook
import_playbook: homebrew.yml
- name: Set terminal profile to be the default
osx_defaults:
domain: com.apple.Terminal
key: "{{ item }} Window Settings"
type: string
value: Alpha
with_items:
- Default
- Startup
- name: Import defaults playbook
import_playbook: defaults.yml
- import_playbook: homebrew.yml
- name: Import dock playbook
import_playbook: dock.yml
- import_playbook: defaults.yml
- import_playbook: dock.yml
- import_playbook: macos.yml
- name: Import macos playbook
import_playbook: macos.yml
- hosts: all
- name: Import ubuntu playbook
import_playbook: ubuntu.yml
- name: Misc macOS things
hosts: os_MacOSX
tasks:
- ansible.builtin.command: "luarocks install fennel"
# https://tratt.net/laurie/blog/2024/faster_shell_startup_with_shell_switching.html
# I'm not sure why this doesn't work on my Linux box, but whatever
- name: Set default shell to sh
ansible.builtin.user:
name: alpha
shell: /bin/sh --login
- name: Install Fennel
ansible.builtin.command: "luarocks install fennel"
args:
creates: /opt/homebrew/bin/fennel
# vim: ft=yaml.ansible

@ -0,0 +1,58 @@
- name: Set up Ubuntu
hosts: os_Ubuntu
tasks:
- name: Add myself to sudoers
become: true
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
regexp: '^alpha ALL='
line: 'alpha ALL=(ALL) NOPASSWD: ALL'
validate: /usr/sbin/visudo -cf %s
- name: Install git
become: true
block:
- name: Add PPA
ansible.builtin.apt_repository:
repo: "ppa:git-core/ppa"
state: present
- name: Install git # noqa: package-latest
ansible.builtin.apt:
name: git
update_cache: true
state: latest
- name: Install git-lfs
become: true
block:
# https://packagecloud.io/github/git-lfs/install#manual-deb
- name: Install dependencies
ansible.builtin.apt:
name:
- debian-archive-keyring
- curl
- gnupg
- apt-transport-https
update_cache: true
- name: Add the GPG key # noqa: command-instead-of-module risky-shell-pipe
ansible.builtin.shell: >
curl -fsSL https://packagecloud.io/github/git-lfs/gpgkey
| gpg --dearmor
> /etc/apt/keyrings/github_git-lfs-archive-keyring.gpg
args:
creates: /etc/apt/keyrings/github_git-lfs-archive-keyring.gpg
- name: Add apt repo
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/github_git-lfs.list
mode: '0644'
content: |
deb [signed-by=/etc/apt/keyrings/github_git-lfs-archive-keyring.gpg] https://packagecloud.io/github/git-lfs/ubuntu jammy main
deb-src [signed-by=/etc/apt/keyrings/github_git-lfs-archive-keyring.gpg] https://packagecloud.io/github/git-lfs/ubuntu jammy main
- name: Install git-lfs
ansible.builtin.apt:
name: git-lfs
update_cache: true
# vim: ft=yaml.ansible

@ -0,0 +1,83 @@
# https://www.fastmail.help/hc/en-us/articles/1500000280261-Setting-up-your-domain-MX-only#domain-registration
#
terraform {
required_providers {
linode = {
source = "linode/linode"
version = "2.18.0"
}
}
}
variable "domain" {
type = object({
id = string,
domain = string,
})
nullable = false
}
resource "linode_domain_record" "fastmail_mx" {
domain_id = var.domain.id
for_each = {
"in1-smtp.messagingengine.com" = 10
"in2-smtp.messagingengine.com" = 20
}
record_type = "MX"
priority = each.value
target = each.key
}
resource "linode_domain_record" "fastmail_spf" {
domain_id = var.domain.id
name = var.domain.domain
record_type = "TXT"
target = "v=spf1 include:spf.messagingengine.com ?all"
}
resource "linode_domain_record" "fastmail_dkim" {
domain_id = var.domain.id
for_each = {
"mesmtp._domainkey" = "mesmtp.${var.domain.domain}.dkim.fmhosted.com"
"fm1._domainkey" = "fm1.${var.domain.domain}.dkim.fmhosted.com"
"fm2._domainkey" = "fm2.${var.domain.domain}.dkim.fmhosted.com"
"fm3._domainkey" = "fm3.${var.domain.domain}.dkim.fmhosted.com"
}
name = each.key
record_type = "CNAME"
target = each.value
}
resource "linode_domain_record" "fastmail_dmarc" {
domain_id = var.domain.id
name = "_dmarc.${var.domain.domain}"
record_type = "TXT"
target = "v=DMARC1; p=none;"
}
resource "linode_domain_record" "fastmail_srv" {
domain_id = var.domain.id
for_each = {
"submission" = { priority = 0, weight = 1, port = 587, target = "smtp.fastmail.com" }
"imap" = { priority = 0, weight = 0, port = 0, target = "." }
"imaps" = { priority = 0, weight = 1, port = 993, target = "imap.fastmail.com" }
"pop3" = { priority = 0, weight = 0, port = 0, target = "." }
"pop3s" = { priority = 10, weight = 1, port = 995, target = "pop.fastmail.com" }
"jmap" = { priority = 0, weight = 1, port = 443, target = "api.fastmail.com" }
}
service = each.key
record_type = "SRV"
priority = each.value.priority
protocol = "tcp"
weight = each.value.weight
port = each.value.port
target = each.value.target
}

@ -0,0 +1,13 @@
all:
hosts:
localhost:
ansible_connection: local
ansible_python_interpreter: "{{ansible_playbook_python}}"
ramble-hard:
ansible_user: root
ansible_python_interpreter: /usr/bin/python3
on-fire-within:
lotus-land-story:
dev:
hosts:
localhost:

@ -0,0 +1,2 @@
terraform.tfvars
vars.yml

@ -0,0 +1,44 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/http" {
version = "3.2.1"
constraints = "3.2.1"
hashes = [
"h1:Q2YQZzEhHQVlkQCQVpMzFVs0Gg+eXzISbOwaOYqpflc=",
"zh:088b3b3128034485e11dff8da16e857d316fbefeaaf5bef24cceda34c6980641",
"zh:09ed1f2462ea4590b112e048c4af556f0b6eafc7cf2c75bb2ac21cd87ca59377",
"zh:39c6b0b4d3f0f65e783c467d3f634e2394820b8aef907fcc24493f21dcf73ca3",
"zh:47aab45327daecd33158a36c1a36004180a518bf1620cdd5cfc5e1fe77d5a86f",
"zh:4d70a990aa48116ab6f194eef393082c21cf58bece933b63575c63c1d2b66818",
"zh:65470c43fda950c7e9ac89417303c470146de984201fff6ef84299ea29e02d30",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:842b4dd63e438f5cd5fdfba1c09b8fdf268e8766e6690988ee24e8b25bfd9e8d",
"zh:a167a057f7e2d80c78d4b4057538588131fceb983d5c93b07675ad9eb1aa5790",
"zh:d0ba69b62b6db788cfe3cf8f7dc6e9a0eabe2927dc119d7fe3fe6573ee559e66",
"zh:e28d24c1d5ff24b1d1cc6f0074a1f41a6974f473f4ff7a37e55c7b6dca68308a",
"zh:fde8a50554960e5366fd0e1ca330a7c1d24ae6bbb2888137a5c83d83ce14fd18",
]
}
provider "registry.terraform.io/linode/linode" {
version = "1.30.0"
constraints = "1.30.0"
hashes = [
"h1:d03YFL0XRD3H1SNwxX4tud+xw3j0HERNK81QNpD6e7g=",
"zh:197c61c5eb2252f65c18d2aa65cdc0511617b13e2388118f3fe063d7969dd7ad",
"zh:1a66470682acb13dc57308d5b1eaa19ff60c2404a3b15714e3072d02d569b1a5",
"zh:368cdcf17073a39687da830c02cf3ce50e0d8f03b7ec808b49561628be798abc",
"zh:42f2510a70afbb7fc8928df119d1e14ce1b61d2aded13b88072858ee5861feb2",
"zh:57734dd1e8255abd52a33ff79c20ef4efc3831850b22dd1a628e6301c3cf95c6",
"zh:61d614a7a4607bfc4ab6bfd0501007501957b973dbd028e0e513a3d4df07f12e",
"zh:79243f22fc0a9adfc1123abdd17c515f0ce4d8147302889033b6c44f6a48337e",
"zh:9f7cd46185bbe2c001dab1d0bd6c17a9740e7279d3fffe93755f2c964e267213",
"zh:9fdc9f8f47bde4140bc14cf082bbc2ceb63a3bebf0683df2fefd83c9e248274c",
"zh:aa1fd80a7ea245f8b852e40c68ccde2d8b6446e2138ebdec7425c67e82099881",
"zh:bb31f1ba5b0e001cf343d3a4cfafa70e6f3e30fd8a200d2cd7e077663efe0456",
"zh:da87881fa030287df2009028c49581e1fd0ff89baef0d8543b27ca506eff2971",
"zh:ed6afd7b1bc7237a9dff5c721ca3a5c7c505803cd5ea0b4ad0dfdf07ed6f9b0d",
"zh:ee653d5d08cb331ce2d8dc1010e68d363470ae87be62c0515e5d2418727cd02b",
]
}

@ -0,0 +1,22 @@
# Lotus Land Story
```
# setup
export LOTUS_LAND_STORY_DOMAIN=...
# terraform
rake terraform
# add the IP to ~/.ssh/config
# make sure we can hit it
ansible all -m ping
# ansible
rake ansible
# manually connect to tailscale
# specific playbooks
rake ansible[playbook]
```

@ -0,0 +1,60 @@
require "open3"
require "yaml"
DOMAIN = ENV.fetch("LOTUS_LAND_STORY_DOMAIN")
task bootstrap: "pip:sync"
namespace :pip do
task :sync => "requirements.txt" do
sh "pip-sync requirements.txt"
end
desc ""
task :upgrade do
out, _ = Open3.capture2e("pip-compile --upgrade --resolver=backtracking requirements.in")
File.write("requirements.txt", out)
end
file "requirements.txt" => "requirements.in" do |t|
out, _ = Open3.capture2e("pip-compile --resolver=backtracking requirements.in")
File.write(t.name, out)
end
end
desc ""
task terraform: "terraform.tfvars" do
sh "terraform apply"
end
desc ""
task :ansible, %i[ playbook ] => "vars.yml" do |_, args|
playbook = args.fetch(:playbook, "main")
sh "ansible-playbook #{playbook}.yml"
end
task "terraform.tfvars" do |t|
File.write(t.name, "domain = \"#{DOMAIN}\"")
end
task "vars.yml" do |t|
linode_volume = `terraform output -raw lotus_land_story_volume`
miniflux_db_password = `op read "op://Private/Miniflux/db password"`.strip
miniflux_password = `op read op://Private/Miniflux/password`.strip
prometheus_linode_api_token = `op read "op://Private/lotus-land-story/prometheus/linode api token"`.strip
File.write(t.name, YAML.dump({
"domain" => DOMAIN,
"linode_volume" => linode_volume,
"prometheus" => {
"linode_api_token" => prometheus_linode_api_token,
},
"miniflux" => {
"db_password" => miniflux_db_password,
"password" => miniflux_password,
},
}))
end
task default: %i[ terraform ansible ]

@ -0,0 +1,2 @@
[defaults]
inventory=hosts.yml

@ -0,0 +1,50 @@
- name: Set up Authelia
hosts: lotus-land-story
vars:
version: 4.38.10
vars_files:
- vars.yml
tasks:
- name: Create directories for volume mounting
ansible.builtin.file:
path: /mnt/lotus-land-story/authelia/{{ item }}
state: directory
mode: "0700"
loop:
- config
- secrets
- name: Copy configuration
ansible.builtin.template:
src: templates/authelia_{{ item }}.yml
dest: /mnt/lotus-land-story/authelia/config/{{ item }}.yml
mode: "0644"
loop:
- configuration
- users_database
- name: Get docker network
community.docker.docker_network:
name: lotus_land_story
register: docker_network
- name: Run Authelia
community.docker.docker_container:
restart: true
name: authelia
image: docker.io/authelia/authelia:{{ version }}
env:
AUTHENTICATION_GUARD: remote_user_guard
volumes:
- /mnt/lotus-land-story/authelia/config:/config
- /mnt/lotus-land-story/authelia/secrets:/secrets
restart_policy: unless-stopped
networks:
- name: lotus_land_story
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,61 @@
- name: Set up Caddy
hosts: lotus-land-story
vars_files:
- vars.yml
vars:
version: 2.8.4
tasks:
- name: Make /mnt/lotus-land-story/caddy
ansible.builtin.file:
path: /mnt/lotus-land-story/{{ item }}
state: directory
mode: "0755"
loop:
- caddy
- caddy/data
- name: Get docker network for trusted proxies
community.docker.docker_network:
name: lotus_land_story
register: docker_network
# TODO Reload Caddy when this changes:
# docker exec -w /etc/caddy $caddy_container_id caddy reload
- name: Set up Caddyfile
ansible.builtin.template:
src: templates/Caddyfile
dest: /mnt/lotus-land-story/caddy/Caddyfile
mode: "0644"
vars:
trusted_proxies: "{{ docker_network.network.IPAM.Config[0].Subnet }}"
- name: Create Caddy volume
community.docker.docker_volume:
name: caddy
- name: Run Caddy
community.docker.docker_container:
name: caddy
image: caddy:{{ version }}
restart: true
ports:
- "80:80"
- "443:443"
- "443:443/udp"
- "2019:2019"
volumes:
- /mnt/lotus-land-story/caddy/Caddyfile:/etc/caddy/Caddyfile
- /mnt/lotus-land-story/caddy/data:/data
- caddy-config:/config
restart_policy: unless-stopped
networks:
- name: lotus_land_story
etc_hosts:
host.docker.internal: host-gateway
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,43 @@
- name: Set up Calibre-web
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Create directories for volume mounting
ansible.builtin.file:
path: /mnt/lotus-land-story/calibre-web/{{ item }}
state: directory
mode: "0755"
loop:
- books
- config
- name: Get docker network
community.docker.docker_network:
name: lotus_land_story
register: docker_network
# https://github.com/linuxserver/docker-calibre-web#docker-compose-recommended-click-here-for-more-info
- name: Run Calibre-web
community.docker.docker_container:
name: calibre-web
image: lscr.io/linuxserver/calibre-web:latest
restart: true
env:
PUID: "0"
PGID: "0"
TZ: Etc/UTC
DOCKER_MODS: linuxserver/mods:universal-calibre
volumes:
- /mnt/lotus-land-story/calibre-web/books:/books
- /mnt/lotus-land-story/calibre-web/config:/config
restart_policy: unless-stopped
networks:
- name: lotus_land_story
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,87 @@
# https://docs.docker.com/engine/install/debian/#install-using-the-repository
- name: Set up Docker
hosts: lotus-land-story
tasks:
- name: Install Docker requirements
ansible.builtin.apt:
pkg:
- ca-certificates
- curl
- gnupg
state: present
- name: Make /etc/apt/keyrings
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
mode: "0755"
- name: Download Docker GPG key
ansible.builtin.shell: |
set -o pipefail
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
args:
creates: /etc/apt/keyrings/docker.gpg
- name: Get architecture
ansible.builtin.command: dpkg --print-architecture
register: arch
changed_when: arch.rc != 0
- name: Set up Docker repository
ansible.builtin.template:
src: templates/docker.list
dest: /etc/apt/sources.list.d/docker.list
mode: "0644"
- name: Install Docker
ansible.builtin.apt:
pkg:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
update_cache: true
- name: Create Docker volume location
ansible.builtin.file:
path: /mnt/lotus-land-story/docker
state: directory
mode: "0755"
- name: Get docker0 IP address
ansible.builtin.shell: ip -4 -o addr show docker0 | awk '{print $4}' # noqa: risky-shell-pipe
vars:
executable: /usr/bin/bash
register: docker_ip
changed_when: docker_ip.rc != 0
- name: Save docker_ip fact
ansible.builtin.set_fact:
docker_ip:
cidr: "{{ docker_ip.stdout }}"
address: "{{ docker_ip.stdout | ansible.utils.ipaddr('address') }}"
- name: Configure Docker daemon
ansible.builtin.template:
src: templates/daemon.json
dest: /etc/docker/daemon.json
mode: "0644"
notify: Restart docker
- name: Create docker network
community.docker.docker_network:
name: lotus_land_story
- name: Create docker network
community.docker.docker_network:
name: lotus-land-story
register: docker_network
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,67 @@
# https://docs.firefly-iii.org/firefly-iii/installation/docker/#straight-from-docker-hub
- name: Set up Firefly III
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Create directories for volume mounting
ansible.builtin.file:
path: /mnt/lotus-land-story/firefly-iii/{{ item }}
state: directory
mode: "0755"
loop:
- upload
- database
- name: Get docker network
community.docker.docker_network:
name: lotus_land_story
register: docker_network
- name: Run Firefly III
community.docker.docker_container:
name: firefly-iii
image: fireflyiii/core:version-6.0.22
state: absent
restart: true
env:
APP_KEY: "{{ firefly_iii.app_key }}"
APP_URL: https://{{ firefly_iii.subdomain }}.{{ domain }}
TRUSTED_PROXIES: "**" # TODO Set this to caddy?
DB_CONNECTION: sqlite
STATIC_CRON_TOKEN: "{{ firefly_iii.static_cron_token }}"
MAIL_MAILER: smtp
MAIL_HOST: smtp.sendgrid.net
MAIL_PORT: "465"
MAIL_FROM: money@{{ domain }}
MAIL_USERNAME: apikey
MAIL_PASSWORD: "{{ firefly_iii.mail_password }}"
MAIL_ENCRYPTION: "true"
volumes:
- /mnt/lotus-land-story/firefly-iii/upload:/var/www/html/storage/upload
- /mnt/lotus-land-story/firefly-iii/database:/var/www/html/storage/database
restart_policy: unless-stopped
networks:
- name: lotus_land_story
- name: Run Firefly III cron trigger
community.docker.docker_container:
name: firefly-iii-cron
image: alpine
restart: true
state: absent
command: >
sh -c
"echo \"0 3 * * * wget -qO- http://firefly-iii:8080/api/v1/cron/{{ firefly_iii.static_cron_token }}\"
| crontab - && crond -f -L /dev/stdout"
restart_policy: unless-stopped
networks:
- name: lotus_land_story
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,31 @@
- name: Set up Golink
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Make /mnt/lotus-land-story/golink
ansible.builtin.file:
path: /mnt/lotus-land-story/golink
state: directory
mode: "0755"
owner: 65532
group: 65532
- name: Run Golink
community.docker.docker_container:
# recreate: true
# restart: true
name: golink
image: ghcr.io/tailscale/golink:main
env:
TS_AUTHKEY: "{{ golink.auth_key }}"
volumes:
- /mnt/lotus-land-story/golink:/home/nonroot
restart_policy: unless-stopped
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,84 @@
- name: Set up Grafana
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Create Grafana dir
ansible.builtin.file:
path: /mnt/lotus-land-story/grafana/provisioning/{{ item }}
state: directory
mode: "0755"
loop:
- datasources
- name: Configure Grafana
ansible.builtin.copy:
dest: /mnt/lotus-land-story/grafana/grafana.ini
content: |
[metrics]
enabled = true
disable_total_stats = false
[server]
domain = grafana.{{ domain }}
http_addr = 0.0.0.0
root_url = https://grafana.{{ domain }}
[auth.generic_oauth]
enabled = true
name = Authelia
icon = signin
client_id = grafana
client_secret = {{ grafana.oauth_secret }}
scopes = openid profile email groups
empty_scopes = false
auth_url = https://auth.{{ domain }}/api/oidc/authorization
token_url = https://auth.{{ domain }}/api/oidc/token
api_url = https://auth.{{ domain }}/api/oidc/userinfo
login_attribute_path = preferred_username
groups_attribute_path = groups
name_attribute_path = name
use_pkce = true
mode: "0644"
- name: Provision Prometheus
ansible.builtin.copy:
dest: /mnt/lotus-land-story/grafana/provisioning/datasources/prometheus.yml
content: |
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
# Access mode - proxy (server in the UI) or direct (browser in the UI).
access: proxy
url: http://prometheus:9090
jsonData:
httpMethod: POST
manageAlerts: true
prometheusType: Prometheus
prometheusVersion: 2.37.0
mode: "0644"
- name: Create Grafana volume
community.docker.docker_volume:
name: grafana
- name: Run Grafana
community.docker.docker_container:
# recreate: true
# restart: true
name: grafana
image: grafana/grafana-oss:9.2.15
# ports:
# - "3000:3000"
volumes:
- /mnt/lotus-land-story/grafana/grafana.ini:/etc/grafana/grafana.ini
- /mnt/lotus-land-story/grafana/provisioning:/etc/grafana/provisioning
- grafana:/var/lib/grafana
restart_policy: unless-stopped
networks:
- name: lotus_land_story
etc_hosts:
host.docker.internal: host-gateway
# vim: ft=yaml.ansible

@ -0,0 +1,33 @@
- name: Set up hledger
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Create directory for volume mounting
ansible.builtin.file:
path: /mnt/lotus-land-story/hledger
state: directory
mode: "0755"
- name: Run hledger
community.docker.docker_container:
state: absent
restart: true
name: hledger
image: dastapov/hledger:1.31
env:
HLEDGER_JOURNAL_FILE: /data/all.journal
HLEDGER_BASE_URL: https://{{ hledger.subdomain }}.{{ domain }}
HLEDGER_ARGS: --capabilities=view,add,manage
volumes:
- /mnt/lotus-land-story/hledger:/data
restart_policy: unless-stopped
networks:
- name: lotus_land_story
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,3 @@
all:
hosts:
lotus-land-story:

@ -0,0 +1,131 @@
# https://www.parseable.com/docs/log-ingestion/agents/vector
# https://vector.dev/docs/setup/installation/platforms/docker/
- name: Set up Parseable
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Create directories for volume mounting
ansible.builtin.file:
path: /mnt/lotus-land-story/parseable/{{ item }}
state: directory
mode: "0755"
loop:
- data
- staging
- name: Configure Vector
ansible.builtin.copy:
dest: /mnt/lotus-land-story/parseable/vector.yml
content: |
sources:
# vector_metrics:
# type: internal_metrics
raw_docker_logs:
type: docker_logs
transforms:
docker_logs:
type: remap
inputs:
- raw_docker_logs
source: |
if includes(["authelia", "caddy", "miniflux"], .container_name) {
. |= object!(parse_json!(.message))
} else if .container_name == "paperless" {
# asctime has trailing milliseconds, which I can't figure out
# how to parse, but I also don't care about it, so drop it
parsed = parse_regex!(
.message,
r'\[(?P<asctime>.*?),\d*\] \[(?P<level>.*?)\] \[(?P<name>.*?)\] (?P<message>.*)',
)
.paperless_time = parse_timestamp!(del(parsed.asctime), format: "%F %T")
. |= parsed
} else if includes(["grafana", "loki"], .container_name) {
. |= parse_key_value!(.message)
}
sinks:
# console:
# type: console
# inputs:
# - demo_logs
# encoding:
# codec: json
parseable:
type: http
method: post
batch:
max_bytes: 10485760
max_events: 1000
timeout_secs: 10
compression: gzip
inputs:
- docker_logs
encoding:
codec: json
uri: http://parseable:8000/api/v1/ingest
auth:
strategy: basic
user: alpha
password: {{ parseable.password }}
request:
headers:
X-P-Stream: vector
healthcheck:
enabled: true
path: http://parseable/api/v1/liveness
port: 8000
# prometheus:
# type: prometheus_remote_write
# endpoint: http://prometheus:9090
# inputs:
# - vector_metrics
mode: "0600"
- name: Get docker network
community.docker.docker_network:
name: lotus_land_story
register: docker_network
# https://www.parseable.com/logstash/docker-compose.yaml
- name: Run Parseable
community.docker.docker_container:
restart: true
name: parseable
image: parseable/parseable:v0.7.3
command:
- parseable
- local-store
env:
P_FS_DIR: /parseable/data
P_STAGING_DIR: /parseable/staging
P_USERNAME: alpha
P_PASSWORD: "{{ parseable.password }}"
P_OIDC_CLIENT_ID: parseable
P_OIDC_CLIENT_SECRET: "{{ parseable.oidc_secret }}"
P_OIDC_ISSUER: https://auth.{{ domain }}
P_ORIGIN_URI: https://logs.{{ domain }}
# RUST_LOG: warning
volumes:
- ./data:/parseable/data
- ./staging:/parseable/staging
restart_policy: unless-stopped
networks:
- name: lotus_land_story
- name: Run Vector
community.docker.docker_container:
restart: true
name: vector
image: timberio/vector:0.35.0-alpine
env:
# VECTOR_LOG: debug
volumes:
- /mnt/lotus-land-story/parseable/vector.yml:/etc/vector/vector.yaml
- /var/run/docker.sock:/var/run/docker.sock # for docker_logs
restart_policy: unless-stopped
networks:
- name: lotus_land_story
# vim: ft=yaml.ansible

@ -0,0 +1,184 @@
# https://raw.githubusercontent.com/grafana/loki/v2.8.0/production/docker-compose.yaml
# https://grafana.com/docs/loki/latest/clients/docker-driver/
- name: Set up Loki
hosts: lotus-land-story
tasks:
- name: Provision Grafana
ansible.builtin.copy:
dest: /mnt/lotus-land-story/grafana/provisioning/datasources/loki.yml
content: |
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: http://loki:3100
mode: "0644"
- name: Create Loki config dirs
ansible.builtin.file:
path: /mnt/lotus-land-story/loki/{{ item }}
state: directory
owner: 10001
group: 10001
mode: "0755"
loop:
- config
- data
- name: Configure Loki
ansible.builtin.copy:
dest: /mnt/lotus-land-story/loki/config/loki.yml
content: |
auth_enabled: false
server:
http_listen_port: 3100
common:
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
compactor:
retention_enabled: true
limits_config:
retention_period: 168h
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
# https://grafana.com/docs/loki/latest/operations/storage/tsdb/
- from: 2023-07-17
index:
period: 24h
prefix: index_
object_store: filesystem
schema: v12
store: tsdb
storage_config:
filesystem:
directory: /data
ruler:
alertmanager_url: http://localhost:9093
owner: 10001
group: 10001
mode: "0644"
# https://github.com/grafana/loki/issues/2361
- name: Configure Promtail
ansible.builtin.copy:
dest: /mnt/lotus-land-story/loki/config/promtail.yml
content: |
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://loki:3100/loki/api/v1/push
scrape_configs:
- job_name: system
static_configs:
- targets:
- localhost
labels:
job: varlogs
__path__: /var/log/*.log
__path_exclude__: /var/log/syslog
- job_name: docker
docker_sd_configs:
- host: unix:///var/run/docker.sock
refresh_interval: 5s
relabel_configs:
- source_labels: ['__meta_docker_container_name']
regex: '/(.*)'
target_label: 'container'
- job_name: syslog
syslog:
listen_address: 0.0.0.0:514
listen_protocol: tcp
idle_timeout: 60s
label_structured_data: yes
labels:
job: syslog
relabel_configs:
- source_labels: ['__syslog_message_hostname']
target_label: 'host'
- source_labels: ['__syslog_message_severity']
target_label: level
- source_labels: ['__syslog_message_facility']
target_label: syslog_facility
- source_labels: ['__syslog_message_app_name']
target_label: syslog_identifier
mode: "0644"
- name: Run Loki
community.docker.docker_container:
name: loki
image: grafana/loki:2.8.0
restart: true
command: -config.file=/mnt/config/loki.yml
ports:
- "3100:3100"
volumes:
- /mnt/lotus-land-story/loki/config:/mnt/config
- /mnt/lotus-land-story/loki/data:/data
restart_policy: unless-stopped
networks:
- name: lotus_land_story
etc_hosts:
host.docker.internal: host-gateway
- name: Run Promtail
community.docker.docker_container:
name: promtail
image: grafana/promtail:2.8.0
restart: true
command: -config.file=/mnt/config/promtail.yml
ports:
- "514:514"
volumes:
- /var/log:/var/log
- /var/run/docker.sock:/var/run/docker.sock
- /mnt/lotus-land-story/loki/config:/mnt/config
restart_policy: unless-stopped
networks:
- name: lotus_land_story
etc_hosts:
host.docker.internal: host-gateway
- name: Ship to promtail using rsyslog
ansible.builtin.copy:
content: |
*.* action(type="omfwd"
protocol="tcp"
target="127.0.0.1"
port="514"
Template="RSYSLOG_SyslogProtocol23Format"
TCP_Framing="octet-counted"
KeepAlive="on")
dest: /etc/rsyslog.d/50-promtail.conf
mode: "0644"
notify: Restart rsyslog
handlers:
- name: Restart rsyslog
ansible.builtin.service:
name: rsyslog
state: restarted
# vim: ft=yaml.ansible

@ -0,0 +1,86 @@
terraform {
required_providers {
linode = {
source = "linode/linode"
version = "1.30.0"
}
http = {
source = "hashicorp/http"
version = "3.2.1"
}
}
}
variable "domain" {
type = string
nullable = false
}
data "http" "github_keys" {
url = "https://github.com/kejadlen.keys"
}
resource "linode_instance" "lotus_land_story" {
label = "lotus-land-story"
image = "linode/debian11"
region = "us-west"
type = "g6-nanode-1"
authorized_keys = split("\n", chomp(data.http.github_keys.response_body))
backups_enabled = true
}
resource "linode_volume" "lotus_land_story" {
label = "lotus-land-story"
region = "us-west"
linode_id = linode_instance.lotus_land_story.id
size = 10
connection {
host = resource.linode_instance.lotus_land_story.ip_address
}
provisioner "remote-exec" {
inline = [
"mkfs.ext4 '${self.filesystem_path}'",
"mkdir '/mnt/lotus-land-story'",
"mount '${self.filesystem_path}' '/mnt/lotus-land-story'",
]
}
}
data "linode_domain" "domain" {
domain = var.domain
}
resource "linode_domain_record" "subdomains" {
domain_id = data.linode_domain.domain.id
record_type = "A"
target = resource.linode_instance.lotus_land_story.ip_address
for_each = toset([
"auth",
"books",
"grafana",
"hledger",
"loki",
"prometheus",
"rss",
])
name = each.key
}
resource "linode_domain_record" "prometheus" {
domain_id = data.linode_domain.domain.id
name = "prometheus"
record_type = "A"
target = resource.linode_instance.lotus_land_story.ip_address
}
output "lotus_land_story_ip" {
value = resource.linode_instance.lotus_land_story.ip_address
}
output "lotus_land_story_volume" {
value = resource.linode_volume.lotus_land_story.filesystem_path
}

@ -0,0 +1,125 @@
- name: Base setup for lotus-land-story
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Always mount the lotus-land-story volume
ansible.builtin.lineinfile:
dest: /etc/fstab
line: "{{ linode_volume }} /mnt/lotus-land-story ext4 defaults,noatime,nofail 0 2"
state: present
- name: Set hostname
ansible.builtin.hostname:
name: lotus-land-story
- name: Install ansible requirements
ansible.builtin.apt:
pkg:
- docker-compose
- libpq-dev
- python3-docker
- python3-psycopg2
state: present
- import_playbook: ../playbooks/tailscale.yml # noqa: name[play]
- import_playbook: docker.yml # noqa: name[play]
- import_playbook: postgres.yml # noqa: name[play]
- import_playbook: redis.yml # noqa: name[play]
- name: Listen on the docker interface
hosts: lotus-land-story
tasks:
- name: Set up postgres to listen on docker0 interface
ansible.builtin.lineinfile:
dest: /etc/postgresql/13/main/conf.d/listen.conf
regexp: '^#?listen_addresses='
line: "listen_addresses='localhost,{{ docker_ip.address }}'"
state: present
create: true
mode: "0644"
notify: Restart postgres
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
- import_playbook: prometheus.yml # noqa: name[play]
# Maybe this should be in the prometheus playbook?
- name: Set up prometheus user in postgres
hosts: lotus-land-story
become: true
become_user: postgres
tasks:
- name: Get postgres roles
community.postgresql.postgresql_info:
filter: roles
register: postgres_info
- name: Add postgres permissions for postgres-exporter
community.postgresql.postgresql_query:
query: |
CREATE USER prometheus;
ALTER USER prometheus SET SEARCH_PATH TO prometheus,pg_catalog;
CREATE SCHEMA prometheus AUTHORIZATION prometheus;
CREATE FUNCTION prometheus.f_select_pg_stat_activity()
RETURNS setof pg_catalog.pg_stat_activity
LANGUAGE sql
SECURITY DEFINER
AS $$
SELECT * from pg_catalog.pg_stat_activity;
$$;
CREATE FUNCTION prometheus.f_select_pg_stat_replication()
RETURNS setof pg_catalog.pg_stat_replication
LANGUAGE sql
SECURITY DEFINER
AS $$
SELECT * from pg_catalog.pg_stat_replication;
$$;
CREATE VIEW prometheus.pg_stat_replication
AS
SELECT * FROM prometheus.f_select_pg_stat_replication();
CREATE VIEW prometheus.pg_stat_activity
AS
SELECT * FROM prometheus.f_select_pg_stat_activity();
GRANT SELECT ON prometheus.pg_stat_replication TO prometheus;
GRANT SELECT ON prometheus.pg_stat_activity TO prometheus;
when: "'prometheus' not in postgres_info.roles"
- import_playbook: golink.yml # noqa: name[play]
- import_playbook: grafana.yml # noqa: name[play]
- import_playbook: loki.yml # noqa: name[play]
- import_playbook: miniflux.yml # noqa: name[play]
- import_playbook: caddy.yml # noqa: name[play]
- name: Set up ufw
hosts: lotus-land-story
tasks:
- name: Get docker network for ufw
community.docker.docker_network:
name: lotus_land_story
register: docker_network
- name: Allow access from docker network
community.general.ufw:
rule: allow
from_ip: "{{ docker_network.network.IPAM.Config[0].Subnet }}"
notify: Reload ufw
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,88 @@
# https://miniflux.app/docs/installation.html#docker
- name: Set up the Miniflux db
hosts: lotus-land-story
become: true
become_user: postgres
vars_files:
- vars.yml
tasks:
- name: Create the Miniflux db
community.postgresql.postgresql_db:
name: miniflux
notify: Restart postgres
- name: Create the Miniflux db user
community.postgresql.postgresql_user:
db: miniflux
name: miniflux
password: "{{ miniflux.db_password }}"
notify: Restart postgres
- name: Grant Miniflux access to the db
community.postgresql.postgresql_pg_hba:
dest: /etc/postgresql/13/main/pg_hba.conf
contype: host
users: miniflux
source: samenet # TODO Can this be restricted to docker_ip?
databases: miniflux
create: true
notify: Restart postgres
- name: Install hstore
community.postgresql.postgresql_ext:
name: hstore
db: miniflux
notify: Restart postgres
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
- name: Run Miniflux
hosts: lotus-land-story
vars_files:
- vars.yml
vars:
version: 2.2.0
tasks:
- name: Get docker network
community.docker.docker_network:
name: lotus_land_story
register: docker_network
- name: Run Miniflux
community.docker.docker_container:
restart: true
name: miniflux
image: miniflux/miniflux:{{ version }}
env:
DATABASE_URL: postgres://miniflux:{{ miniflux.db_password }}@host.docker.internal/miniflux
RUN_MIGRATIONS: "1"
CREATE_ADMIN: "1"
ADMIN_USERNAME: alpha
ADMIN_PASSWORD: "{{ miniflux.password }}"
BASE_URL: https://rss.{{ domain }}
METRICS_COLLECTOR: "1"
METRICS_ALLOWED_NETWORKS: "{{ docker_network.network.IPAM.Config[0].Subnet }}"
OAUTH2_PROVIDER: oidc
OAUTH2_CLIENT_ID: miniflux
OAUTH2_CLIENT_SECRET: "{{ miniflux.oidc_secret }}"
OAUTH2_REDIRECT_URL: https://rss.{{ domain }}/oauth2/oidc/callback
OAUTH2_OIDC_DISCOVERY_ENDPOINT: https://auth.{{ domain }}
OAUTH2_USER_CREATION: "1"
LOG_FORMAT: json
LOG_LEVEL: info
restart_policy: unless-stopped
networks:
- name: lotus_land_story
etc_hosts:
host.docker.internal: host-gateway
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,65 @@
- name: Set up Paperless-ngx
hosts: lotus-land-story
vars_files:
- vars.yml
vars:
version: 2.11.6
tasks:
- name: Create directories for volume mounting
ansible.builtin.file:
path: /mnt/lotus-land-story/paperless-ngx/{{ item }}
state: directory
mode: "0700"
loop:
- consume
- data
- export
- media
- name: Celery monitoring configuration
ansible.builtin.copy:
content: |
url_prefix = "/flower/"
dest: /mnt/lotus-land-story/paperless-ngx/flowerconfig.py
mode: "0644"
- name: Get docker network
community.docker.docker_network:
name: lotus_land_story
register: docker_network
# https://github.com/paperless-ngx/paperless-ngx/blob/main/docker/compose/docker-compose.sqlite.yml
- name: Run Paperless-ngx
community.docker.docker_container:
restart: true
name: paperless-ngx
image: ghcr.io/paperless-ngx/paperless-ngx:{{ version }}
env:
PAPERLESS_ENABLE_FLOWER: "true"
PAPERLESS_ENABLE_HTTP_REMOTE_USER: "true"
PAPERLESS_OCR_USER_ARGS: '{"continue_on_soft_render_error": true}'
PAPERLESS_REDIS: redis://host.docker.internal:6379
PAPERLESS_TASK_WORKERS: "1"
PAPERLESS_THREADS_PER_WORKER: "1"
PAPERLESS_TIME_ZONE: America/Los_Angeles
PAPERLESS_URL: https://docs.kejadlen.dev
PAPERLESS_WEBSERVER_WORKERS: "1"
volumes:
- /mnt/lotus-land-story/paperless-ngx/data:/usr/src/paperless/data
- /mnt/lotus-land-story/paperless-ngx/media:/usr/src/paperless/media
- /mnt/lotus-land-story/paperless-ngx/export:/usr/src/paperless/export
- /mnt/lotus-land-story/paperless-ngx/consume:/usr/src/paperless/consume
- /mnt/lotus-land-story/paperless-ngx/flowerconfig.py:/usr/src/paperless/src/paperless/flowerconfig.py:ro
restart_policy: unless-stopped
networks:
- name: lotus_land_story
etc_hosts:
host.docker.internal: host-gateway
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,70 @@
# https://wiki.debian.org/PostgreSql
- name: Set up postgres
hosts: lotus-land-story
tasks:
- name: Install postgres
ansible.builtin.apt:
pkg:
- postgresql
- postgresql-client
state: present
- name: Make data directory
ansible.builtin.file:
path: /mnt/lotus-land-story/postgresql
state: directory
owner: postgres
mode: "0700"
- name: Set data directory
ansible.builtin.lineinfile:
dest: "/etc/postgresql/13/main/postgresql.conf"
regexp: '^#?data_directory ='
line: "data_directory = '/mnt/lotus-land-story/postgresql'"
state: present
notify: Restart postgres
- name: Allow access from localhost
community.general.ufw:
rule: allow
port: 5432
proto: tcp
from_ip: 127.0.0.1
notify: Reload ufw
# https://pgtune.leopard.in.ua/
# DB Version: 15
# OS Type: linux
# DB Type: web
# Total Memory (RAM): 4 GB
# CPUs num: 2
# Data Storage: ssd
- name: Tune postgres
ansible.builtin.lineinfile:
dest: "/etc/postgresql/13/main/postgresql.conf"
regexp: '^#?{{ item.key }} ='
line: "{{ item.key }} = {{ item.value }}"
state: present
loop: "{{ configs | dict2items }}"
vars:
configs:
max_connections: 200
shared_buffers: 1GB
effective_cache_size: 3GB
maintenance_work_mem: 256MB
checkpoint_completion_target: 0.9
wal_buffers: 16MB
default_statistics_target: 100
random_page_cost: 1.1
effective_io_concurrency: 200
work_mem: 2621kB
min_wal_size: 1GB
max_wal_size: 4GB
notify: Restart postgres
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,77 @@
- name: Set up Prometheus
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Install host exporters
ansible.builtin.apt:
pkg:
- prometheus-node-exporter
- prometheus-postgres-exporter
state: present
- name: Configure node-exporter
ansible.builtin.lineinfile:
dest: /etc/default/prometheus-node-exporter
regexp: '^ARGS='
# Include filesystems under /mnt
line: ARGS='--collector.filesystem.ignored-mount-points="^/(dev|proc|run|sys|media|var/lib/docker/.+)($|/)"'
state: present
# /usr/share/doc/prometheus-postgres-exporter/README.Debian
- name: Configure postgres-exporter
ansible.builtin.lineinfile:
dest: /etc/default/prometheus-postgres-exporter
regexp: '^DATA_SOURCE_NAME='
line: "DATA_SOURCE_NAME='user=prometheus host=/run/postgresql dbname=postgres'"
state: present
- name: Create Prometheus dir
ansible.builtin.file:
path: /mnt/lotus-land-story/prometheus
state: directory
owner: prometheus
group: prometheus
mode: "0755"
- name: Prometheus config
ansible.builtin.template:
dest: /mnt/lotus-land-story/prometheus/prometheus.yml
src: templates/prometheus.yml
owner: prometheus
group: prometheus
mode: "0600"
- name: Create Prometheus volume
community.docker.docker_volume:
name: prometheus
- name: Get prometheus user info
ansible.builtin.user:
name: prometheus
register: prometheus_user
- name: Run Prometheus
community.docker.docker_container:
# recreate: true
# restart: true
name: prometheus
image: prom/prometheus:v2.43.0
command:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.retention.size=5GB
- --log.format=json
restart: true
user: "{{ prometheus_user.uid }}"
groups: "{{ prometheus_user.group }}"
volumes:
- /mnt/lotus-land-story/prometheus:/etc/prometheus
- prometheus:/prometheus
restart_policy: unless-stopped
networks:
- name: lotus_land_story
etc_hosts:
host.docker.internal: host-gateway
# vim: ft=yaml.ansible

@ -0,0 +1,62 @@
# https://redis.io/docs/install/install-redis/install-redis-on-linux/#install-on-ubuntudebian
- name: Set up Redis
hosts: lotus-land-story
tasks:
- name: Add apt key
ansible.builtin.shell: >
curl -fsSL https://packages.redis.io/gpg |
sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
args:
creates: /usr/share/keyrings/redis-archive-keyring.gpg
- name: Add apt repo
ansible.builtin.apt_repository:
repo: "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb {{ ansible_distribution_release }} main"
state: present
filename: redis
- name: Install Redis
ansible.builtin.apt:
pkg: redis
state: present
# TODO Figure out how to de-duplicate this
- name: Save docker_ip
block:
- name: Get docker0 IP address
ansible.builtin.shell: ip -4 -o addr show docker0 | awk '{print $4}' # noqa: risky-shell-pipe
vars:
executable: /usr/bin/bash
register: docker_ip
changed_when: docker_ip.rc != 0
- name: Save docker_ip fact
ansible.builtin.set_fact:
docker_ip:
cidr: "{{ docker_ip.stdout }}"
address: "{{ docker_ip.stdout | ansible.utils.ipaddr('address') }}"
- name: Listen on docker0 interface
ansible.builtin.lineinfile:
dest: /etc/redis/redis.conf
regexp: '^bind 127.0.0.1'
line: "bind 127.0.0.1 {{ docker_ip.address }} -::1"
state: present
notify: Restart redis
# Disable protected mode since we're only allowing access from localhost
# and docker
- name: Un-protect redis
ansible.builtin.lineinfile:
dest: /etc/redis/redis.conf
regexp: '^protected-mode '
line: "protected-mode no"
state: present
notify: Restart redis
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,4 @@
ansible
netaddr
pip-tools
psycopg2

@ -0,0 +1,50 @@
#
# This file is autogenerated by pip-compile with Python 3.10
# by the following command:
#
# pip-compile --resolver=backtracking requirements.in
#
ansible==7.4.0
# via -r requirements.in
ansible-core==2.14.4
# via ansible
build==0.10.0
# via pip-tools
cffi==1.15.1
# via cryptography
click==8.1.3
# via pip-tools
cryptography==40.0.1
# via ansible-core
jinja2==3.1.2
# via ansible-core
markupsafe==2.1.2
# via jinja2
netaddr==0.8.0
# via -r requirements.in
packaging==23.0
# via
# ansible-core
# build
pip-tools==6.12.3
# via -r requirements.in
psycopg2==2.9.6
# via -r requirements.in
pycparser==2.21
# via cffi
pyproject-hooks==1.0.0
# via build
pyyaml==6.0
# via ansible-core
resolvelib==0.8.1
# via ansible-core
tomli==2.0.1
# via
# build
# pyproject-hooks
wheel==0.40.0
# via pip-tools
# The following packages are considered to be unsafe in a requirements file:
# pip
# setuptools

@ -0,0 +1,25 @@
- name: Restart postgres
ansible.builtin.service:
name: postgresql
state: restarted
- name: Restart docker
ansible.builtin.service:
name: docker
state: restarted
- name: Reload ufw
community.general.ufw:
state: reloaded
- name: Restart ssh
ansible.builtin.service:
name: ssh
state: restarted
- name: Restart redis
ansible.builtin.service:
name: redis-server
state: restarted
# vim: ft=yaml.ansible

@ -0,0 +1,58 @@
# https://docs.searxng.org/admin/installation-docker.html#installation-docker
- name: Set up SearXNG
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Create directory for volume mounting
ansible.builtin.file:
path: /mnt/lotus-land-story/searxng
state: directory
mode: "0700"
- name: Write settings.xml
ansible.builtin.copy:
dest: /mnt/lotus-land-story/searxng/settings.yml
content: |
use_default_settings: true
server:
secret_key: {{ searxng.secret_key }}
search:
autocomplete: duckduckgo
outgoing:
enable_http: false
engines:
- name: brave
disabled: true
- name: stract
disabled: false
weight: 2
mode: "0644"
- name: Get docker network
community.docker.docker_network:
name: lotus_land_story
register: docker_network
- name: Run SearXNG
community.docker.docker_container:
restart: true
name: searxng
image: searxng/searxng:2024.1.17-7c80807bb
env:
SEARXNG_BASE_URL: https://search.{{ domain }}
volumes:
- /mnt/lotus-land-story/searxng/settings.yml:/etc/searxng/settings.yml
restart_policy: unless-stopped
networks:
- name: lotus_land_story
etc_hosts:
host.docker.internal: host-gateway
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,140 @@
{
servers {
metrics
}
}
# https://www.authelia.com/integration/proxies/caddy/#forwarded-header-trust#trusted-proxies
(trusted_proxy_list) {
trusted_proxies {{ trusted_proxies }}
}
:2019 {
metrics
}
loki.{{ domain }} {
reverse_proxy loki:3100
}
rss.{{ domain }} {
reverse_proxy miniflux:8080
}
prometheus.{{ domain }} {
reverse_proxy prometheus:9090
}
grafana.{{ domain }} {
reverse_proxy grafana:3000
}
{% for domain in ("kejadlen.dev", "chislan.family") %}
auth.{{ domain }} {
reverse_proxy authelia:9091 {
import trusted_proxy_list
}
}
{% endfor %}
search.{{ domain }} {
log
handle /opensearch.xml {
reverse_proxy searxng:8080
}
handle /static/* {
reverse_proxy searxng:8080
}
handle /autocompleter {
reverse_proxy searxng:8080
}
handle {
forward_auth authelia:9091 {
uri /api/verify?rd=https://auth.{{ domain }}
# copy_headers Remote-User
# This import needs to be included if you're relying on a trusted proxies configuration.
import trusted_proxy_list
}
reverse_proxy searxng:8080 {
import trusted_proxy_list
}
}
}
# {{ hledger.subdomain }}.{{ domain }} {
# forward_auth authelia:9091 {
# uri /api/verify?rd=https://auth.{{ domain }}
# # copy_headers Remote-User Remote-Groups Remote-Name Remote-Email
# ## This import needs to be included if you're relying on a trusted proxies configuration.
# import trusted_proxy_list
# }
# reverse_proxy hledger:5000 {
# import trusted_proxy_list
# }
# }
books.{{ domain }} {
forward_auth authelia:9091 {
uri /api/verify?rd=https://auth.{{ domain }}
copy_headers Remote-User
## This import needs to be included if you're relying on a trusted proxies configuration.
import trusted_proxy_list
}
reverse_proxy calibre-web:8083 {
import trusted_proxy_list
}
}
docs.chislan.family {
forward_auth authelia:9091 {
uri /api/verify?rd=https://auth.chislan.family
copy_headers Remote-User
# This import needs to be included if you're relying on a trusted proxies configuration.
import trusted_proxy_list
}
reverse_proxy paperless-ngx:8000 {
import trusted_proxy_list
}
redir /flower /flower/
handle /flower/* {
reverse_proxy paperless-ngx:5555
}
}
docs.{{ domain }} {
forward_auth authelia:9091 {
uri /api/verify?rd=https://auth.{{ domain }}
copy_headers Remote-User
# This import needs to be included if you're relying on a trusted proxies configuration.
import trusted_proxy_list
}
reverse_proxy paperless-ngx:8000 {
import trusted_proxy_list
}
redir /flower /flower/
handle /flower/* {
reverse_proxy paperless-ngx:5555
}
}
logs.{{ domain }} {
reverse_proxy parseable:8000
}
# vim: ts=4

@ -0,0 +1,107 @@
# https://www.authelia.com/integration/openid-connect/frequently-asked-questions/#how-do-i-generate-client-secrets
theme: auto
log:
level: debug
format: json
telemetry:
metrics:
enabled: true
authentication_backend:
file:
path: /config/users_database.yml
access_control:
default_policy: deny
rules:
- domain: docs.kejadlen.dev
policy: two_factor
subject:
- group:family
- domain: "*.chislan.family"
policy: two_factor
subject:
- group:family
- domain: "*.kejadlen.dev"
policy: two_factor
subject:
- user:alpha
identity_validation:
reset_password:
jwt_secret: {{ authelia.jwt_secret }}
session:
secret: {{ authelia.session_secret }}
cookies:
- domain: {{ domain }}
authelia_url: https://auth.{{ domain }}
# default_redirection_url: https://www.{{ domain }}
- domain: chislan.family
authelia_url: https://auth.chislan.family
storage:
encryption_key: {{ authelia.storage_encryption_key }}
local:
path: /config/db.sqlite3
notifier:
smtp:
username: apikey
password: {{ authelia.smtp_password }}
address: smtp://smtp.sendgrid.net:25
sender: authelia@kejadlen.dev
identity_providers:
oidc:
jwks:
- key: |
{{ authelia.oidc_private_key | indent(10) }}
clients:
- client_id: grafana
client_name: Grafana
client_secret: $argon2id$v=19$m=65536,t=3,p=4$bHcAAorVdHuZzuz53WfAQA$x+pIDTo6SsGyY9JD4OZ7dT6pkEcPf8Yh6Yb7DXco8aQ
public: false
redirect_uris:
- https://grafana.{{ domain }}/login/generic_oauth
scopes:
- openid
- profile
- groups
- email
- client_id: tailscale
client_name: Tailscale
client_secret: $argon2id$v=19$m=65536,t=3,p=4$RivlSdV1WE/NLfd3Pzrubw$ljSvHj9sb0byolv7fk5G3nL415nS7Ze2RMASwPgfBX0
redirect_uris:
- https://login.tailscale.com/a/oauth_response
scopes:
- openid
- email
- profile
- client_id: gitea
client_name: Gitea
client_secret: $argon2id$v=19$m=65536,t=3,p=4$bMcI49gLNfk6ovxXbg9jFQ$qE/G5lDzkFebKopyGv1FOqkiA64HhRJ9kq+TJCR0HM0
public: false
redirect_uris:
- https://git.{{ domain }}/user/oauth2/authelia/callback
scopes:
- openid
- email
- profile
- client_id: miniflux
client_name: Miniflux
client_secret: $argon2id$v=19$m=65536,t=3,p=4$tK5aBDAHOmNsEZzSYS88eg$z6tkZVIzB0x6RQjCM0v34lguS454lcQd/Sm0+xRfg7w
public: false
redirect_uris:
- https://rss.{{ domain }}/oauth2/oidc/callback
scopes:
- openid
- email
- profile

@ -0,0 +1,16 @@
users:
alpha:
disabled: false
displayname: "Alpha"
password: "$argon2id$v=19$m=65536,t=3,p=4$JHtyy/vVD+37neJUjy5Shw$6GODmDOXW/v7cfhqwuEp30bVSCWLT5R3OEe/Gi5FGX0" # yamllint disable-line rule:line-length
email: alpha@kejadlen.dev
groups:
- admins
- family
lydia:
disabled: false
displayname: "Lydia"
password: "$argon2id$v=19$m=65536,t=3,p=4$ALAevUUnRK1hcwf5jp1OkA$aSwuYjEMrbtcAGfhsclL901QKF5S+6u42NQFG7S8DkI" # yamllint disable-line rule:line-length
email: lydia.islan@gmail.com
groups:
- family

@ -0,0 +1,6 @@
{
"data-root": "/mnt/lotus-land-story/docker",
"log-driver": "local",
"log-opts": {"max-size": "10m", "max-file": "3"},
"metrics-addr": "{{ docker_ip.address }}:9323"
}

@ -0,0 +1 @@
deb [arch="{{ arch.stdout }}" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable

@ -0,0 +1,73 @@
global:
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
# external_labels:
# monitor: 'codelab-monitor'
scrape_configs:
- job_name: prometheus
static_configs:
- targets: ['localhost:9090']
- job_name: node
static_configs:
- targets: ['host.docker.internal:9100']
- job_name: docker
static_configs:
- targets: ['host.docker.internal:9323']
- job_name: caddy
static_configs:
- targets: ['caddy:2019']
- job_name: grafana
static_configs:
- targets: ['grafana:3000']
- job_name: postgres
static_configs:
- targets: ['host.docker.internal:9187']
- job_name: promtail
static_configs:
- targets: ['promtail:9080']
- job_name: miniflux
static_configs:
- targets: ['miniflux:8080']
# - job_name: woodpecker
# bearer_token: {{ woodpecker.api_token }}
# static_configs:
# - targets: ['woodpecker-server:8000']
- job_name: authelia
static_configs:
- targets: ['authelia:9959']
# - job_name: linode
# linode_sd_configs:
# - authorization:
# credentials: {{ prometheus.linode_api_token }}
# relabel_configs:
# # Use the public IPv6 address and port 9100 to scrape the target.
# - source_labels: [__meta_linode_public_ipv6]
# target_label: __address__
# replacement: "[$1]:9100"
- job_name: akkoma
scheme: https
authorization:
credentials: {{ prometheus.akkoma.access_token }}
metrics_path: /api/v1/akkoma/metrics
static_configs:
- targets:
- {{ prometheus.akkoma.target }}
- job_name: paperless
metrics_path: /flower/metrics
static_configs:
- targets: ['paperless-ngx:5555']
# vim: ft=yaml.ansible

@ -0,0 +1,55 @@
- name: Set up Woodpecker
hosts: lotus-land-story
vars_files:
- vars.yml
tasks:
- name: Make /mnt/lotus-land-story/woodpecker
ansible.builtin.file:
path: /mnt/lotus-land-story/woodpecker
state: directory
mode: "0755"
# owner: 65532
# group: 65532
- name: Run Woodpecker server
community.docker.docker_container:
name: woodpecker-server
image: woodpeckerci/woodpecker-server:v0.15.11
state: absent
restart: true
env:
# WOODPECKER_OPEN: "true"
WOODPECKER_HOST: "{{ woodpecker.host }}"
WOODPECKER_AGENT_SECRET: "{{ woodpecker.secret }}"
WOODPECKER_GITEA: "true"
WOODPECKER_GITEA_URL: "{{ woodpecker.gitea.url }}"
WOODPECKER_GITEA_CLIENT: "{{ woodpecker.gitea.client }}"
WOODPECKER_GITEA_SECRET: "{{ woodpecker.gitea.secret }}"
WOODPECKER_ADMIN: "alpha"
volumes:
- /mnt/lotus-land-story/woodpecker:/var/lib/woodpecker
restart_policy: unless-stopped
networks:
- name: lotus_land_story
- name: Run Woodpecker agent
community.docker.docker_container:
name: woodpecker-agent
image: woodpeckerci/woodpecker-agent:v0.15.11
state: absent
restart: true
env:
WOODPECKER_SERVER: woodpecker-server:9000
WOODPECKER_AGENT_SECRET: "{{ woodpecker.secret }}"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
restart_policy: unless-stopped
networks:
- name: lotus_land_story
handlers:
- name: Import restarts
ansible.builtin.import_tasks: restarts.yml
# vim: ft=yaml.ansible

@ -0,0 +1,51 @@
terraform {
cloud {
organization = "kejadlen"
workspaces {
name = "boxen"
}
}
required_providers {
linode = {
source = "linode/linode"
version = "2.18.0"
}
}
}
variable "soa_email" {
type = string
nullable = false
}
data "linode_instances" "ramble_hard" {
filter {
name = "label"
values = ["ramble-hard"]
}
}
data "linode_instances" "lotus_land_story" {
filter {
name = "label"
values = ["lotus-land-story"]
}
}
locals {
ramble_hard = data.linode_instances.ramble_hard.instances.0
lotus_land_story = data.linode_instances.lotus_land_story.instances.0
}
resource "linode_domain" "kejadlen_dev" {
type = "master"
domain = "kejadlen.dev"
soa_email = var.soa_email
}
resource "linode_domain" "chislan_family" {
type = "master"
domain = "chislan.family"
soa_email = var.soa_email
}

@ -0,0 +1,4 @@
- https://github.com/mitchellh/nixos-config
- Download the 64-bit ARM image: https://nixos.org/download.html
- Follow the instructions in the manual: https://nixos.org/manual/nixos/stable/index.html

@ -0,0 +1,16 @@
# On Fire Within
## Setup
- [Installing Hass.io](https://www.home-assistant.io/hassio/installation/)
1. `ansible-playbook playbooks/pi/bootstrap.yml`
1. `ansible-playbook on-fire-within/bootstrap.yml`
1. `curl -fsSL get.docker.com | sh`
- `sudo usermod -aG docker alpha`
1. `curl -sL "https://raw.githubusercontent.com/home-assistant/hassio-installer/master/hassio_install.sh" | bash -s -- -m raspberrypi4`
1. `ansible-playbook on-fire-within/main.yml`
## Notes
- `/usr/share/hassio`

@ -0,0 +1,33 @@
# https://www.home-assistant.io/hassio/installation/
- hosts: on-fire-within
become: yes
tasks:
# Forgot what I need this for...
- name: install software-properties-common
apt: name=software-properties-common
- name: install other dependencies for hass.io
apt:
name:
- apparmor-utils
- apt-transport-https
- avahi-daemon
- ca-certificates
- curl
- dbus
- jq
- network-manager
- socat
# https://www.home-assistant.io/integrations/bluetooth_tracker/
- bluetooth
- libbluetooth-dev
update_cache: yes
- service:
name: ModemManager
enabled: false
# homekit
- name: install dependenies for homekit
apt: name=libavahi-compat-libdnssd-dev

@ -0,0 +1,313 @@
- import_playbook: pi.yml
- import_playbook: tailscale.yml
- import_playbook: hass-io.yml
- import_playbook: pi-hole.yml
- hosts: on-fire-within
become: true
vars_files:
- vars.private
tasks:
- name: Set authorized keys from GitHub
ansible.posix.authorized_key:
user: alpha
state: present
key: https://github.com/kejadlen.keys
- name: Install dependencies
ansible.builtin.apt:
name:
- git
- vim
# Needed for Docker stuff
- docker-compose
- python3-pip
- python-backports-shutil-get-terminal-size
- python-backports.ssl-match-hostname
- name: Install python docker packages
pip:
name:
- docker
- docker-compose
state: latest
- name: Create necessary dirs
file:
path: "{{ item }}"
state: directory
with_items:
- /etc/minio
- /etc/mitmproxy
- /etc/smokeping
- /etc/smokeping/config
- /etc/smokeping/data
- /etc/traefik
- /mnt/mushu/minio
- /mnt/mushu/syncthing
- name: Mount USB drive
ansible.posix.mount:
path: /mnt/mushu
src: /dev/sda
fstype: ext4
state: mounted
- name: Configure ddclient
ansible.builtin.copy:
content: |
daemon=300
use=web
ssl=yes
protocol=googledomains
{% for host in ddclient_hosts %}
login={{ host.login }}, password={{ host.password }} {{ host.host }}
{% endfor %}
dest: /etc/ddclient/ddclient.conf
mode: "0600"
vars:
ddclient_hosts: "{{ ddclient.hosts }}"
notify: Restart ddclient
- name: Traefik static configuration
ansible.builtin.copy:
content: |
providers:
docker:
exposedByDefault: false
file:
filename: /etc/traefik/dynamic_conf.toml
watch: true
entryPoints:
http:
address: ":80"
https:
address: ":443"
certificatesResolvers:
le:
acme:
email: {{ email }}
storage: "/etc/traefik/acme.json"
httpChallenge:
entryPoint: http
api:
insecure: true
accessLog: {}
dest: /etc/traefik/traefik.yml
mode: 0600
# https://docs.syncthing.net/users/faq.html#inotify-limits
- name: Increase inotify limit for syncthing
ansible.builtin.lineinfile:
path: /etc/sysctl.conf
regexp: '^fs.inotify.max_user_watches='
line: fs.inotify.max_user_watches=204800
# The docker_compose module overwrites our existing variables, so this is a
# workaround to save off ones that we need later on in the playbook.
#
# https://github.com/ansible/ansible/issues/33960
- name: Save original host facts
ansible.builtin.set_fact:
"{{ item }}_original": "{{ lookup('vars', item) }}"
with_items:
- minio
- traefik
tags:
- debug
- name: Docker ALL the things!
tags: [docker]
community.docker.docker_compose:
project_name: on-fire-within
pull: true
definition:
version: '2'
services:
minio:
image: kejadlen/minio:latest
container_name: minio
environment:
MINIO_ACCESS_KEY: "{{ minio.access_key }}"
MINIO_SECRET_KEY: "{{ minio.secret_key }}"
volumes:
- /etc/minio:/root/.minio
- /mnt/mushu/minio:/data
user: 0:0 # root
labels:
- traefik.enable=true
- traefik.http.routers.minio.rule=Host(`{{ traefik.host_rules.minio }}`)
- traefik.http.routers.minio.tls=true
- traefik.http.routers.minio.tls.certresolver=le
- traefik.http.services.minio.loadbalancer.server.port=9000
# mitmproxy:
# image: mitmproxy/mitmproxy:latest-ARMv7
# container_name: mitmproxy
# command: mitmweb --web-iface ""
# volumes:
# - /etc/mitmproxy:/home/mitmproxy/.mitmproxy
# labels:
# - traefik.enable=true
# - traefik.tcp.routers.mitmproxy.rule=HostSNI(`{{ traefik.host_rules.mitmproxy }}`)
# - traefik.tcp.routers.mitmproxy.tls.passthrough=true
# - traefik.tcp.services.mitmproxy.loadbalancer.server.port=8080
# - traefik.http.routers.mitmproxy-web.rule=Host(`{{ traefik.host_rules.mitmproxy_web }}`)
# - traefik.http.routers.mitmproxy-web.tls.certresolver=le
# - traefik.http.services.mitmproxy-web.loadbalancer.server.port=8081
pihole:
image: pihole/pihole:2024.06.0
container_name: pihole
ports:
- 53:53/tcp
- 53:53/udp
environment:
TZ: America/Los_Angeles
VIRTUAL_HOST: "{{ pihole.host }}"
WEBPASSWORD: "{{ pihole.password }}"
LOCAL_IPV4: "{{ ansible_default_ipv4.address }}"
volumes:
- /etc/pihole:/etc/pihole
- /etc/dnsmasq.d:/etc/dnsmasq.d
dns:
- 127.0.0.1
- 1.1.1.1
labels:
- traefik.enable=true
- traefik.http.routers.pihole.rule=Host(`{{ traefik.host_rules.pihole }}`)
- traefik.http.routers.pihole.tls=true
- traefik.http.routers.pihole.tls.certresolver=le
- traefik.http.services.pihole.loadbalancer.server.port=80
restart: unless-stopped
syncthing:
image: syncthing/syncthing:1.23.5
container_name: syncthing
ports:
- 22000:22000/tcp # TCP file transfers
- 22000:22000/udp # QUIC file transfers
- 21027:21027/udp # Receive local discovery broadcasts
volumes:
- /etc/syncthing:/var/syncthing
- /mnt/mushu/syncthing:/sync
environment:
PUID: 0
PGID: 0
labels:
- traefik.enable=true
- traefik.http.routers.syncthing.rule=Host(`{{ traefik.host_rules.syncthing }}`)
- traefik.http.routers.syncthing.tls=true
- traefik.http.routers.syncthing.tls.certresolver=le
- traefik.http.services.syncthing.loadbalancer.server.port=8384
restart: unless-stopped
smokeping:
image: lscr.io/linuxserver/smokeping:arm32v7-2.7.3-r9-ls36
container_name: smokeping
environment:
- MASTER_URL=https://smokeping.kejadlen.dev/smokeping/
volumes:
- /etc/smokeping/config:/config
- /etc/smokeping/data:/data
labels:
- traefik.enable=true
- traefik.http.routers.smokeping.rule=Host(`smokeping.kejadlen.dev`)
- traefik.http.routers.smokeping.tls=true
- traefik.http.routers.smokeping.tls.certresolver=le
- traefik.http.services.smokeping.loadbalancer.server.port=80
restart: unless-stopped
traefik:
image: traefik:v2.10.4
container_name: traefik
ports:
- 80:80
- 8080:8080
- 443:443
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /etc/traefik:/etc/traefik
labels:
- traefik.enable=true
- traefik.http.middlewares.auth.basicauth.users=alpha:{{ traefik.password | password_hash("md5") | replace("$", "$$") }}
- traefik.http.routers.traefik.rule=Host(`{{ traefik.host_rules.traefik }}`)
- traefik.http.routers.traefik.tls=true
- traefik.http.routers.traefik.tls.certresolver=le
- traefik.http.routers.traefik.middlewares=auth
- traefik.http.routers.traefik.service=api@internal
restart: unless-stopped
- name: Route Home Assistant through Traefik
block:
# - shell: ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+' | head -n 1
- shell: docker network inspect on-fire-within_default | jq --raw-output .[0].IPAM.Config[0].Gateway
register: docker_gateway_result
- shell: docker network inspect on-fire-within_default | jq --raw-output .[0].IPAM.Config[0].Gateway
register: docker_subnet_result
- set_fact:
docker_gateway: "{{ docker_gateway_result.stdout | trim }}"
docker_subnet: "{{ docker_subnet_result.stdout | trim }}"
- copy:
content: |
[http.routers]
[http.routers.appdaemon]
rule = "Host(`{{ traefik_original.host_rules.appdaemon }}`)"
service = "appdaemon"
[http.routers.appdaemon.tls]
certResolver = "le"
[http.routers.hassio]
rule = "Host(`{{ traefik_original.host_rules.hassio }}`)"
service = "hassio"
[http.routers.hassio.tls]
certResolver = "le"
[http.services]
[http.services.appdaemon.loadBalancer]
[[http.services.appdaemon.loadBalancer.servers]]
url = "http://{{ docker_gateway }}:5050/"
[http.services.hassio.loadBalancer]
[[http.services.hassio.loadBalancer.servers]]
url = "http://{{ docker_gateway }}:8123/"
dest: /etc/traefik/dynamic_conf.toml
mode: 0600
notify: Restart Traefik
tags:
- debug
- name: Ship logs via rsyslog
ansible.builtin.copy:
content: |
*.* action(type="omfwd"
protocol="tcp"
target="lotus-land-story"
port="514"
Template="RSYSLOG_SyslogProtocol23Format"
TCP_Framing="octet-counted"
KeepAlive="on")
dest: /etc/rsyslog.d/50-promtail.conf
mode: "0644"
notify: Restart rsyslog
handlers:
- name: Restart Traefik
docker_container:
name: traefik
restart: yes
ignore_errors: yes
- name: Restart Home Assistant
docker_container:
name: homeassistant
restart: yes
ignore_errors: yes
- name: Restart rsyslog
ansible.builtin.service:
name: rsyslog
state: restarted
# vim: ft=yaml.ansible

@ -0,0 +1,66 @@
- hosts: on-fire-within
become: yes
vars_files:
- vars.private
tasks:
# Workaround for https://github.com/pi-hole/docker-pi-hole/issues/1048
# - https://github.com/pi-hole/docker-pi-hole/issues/1042#issuecomment-1086728157
# - https://github.com/pi-hole/docker-pi-hole/issues/1043#issuecomment-1086936352
- name: Work around a Docker libseccomp issue w/Pi-Hole
block:
- apt_key:
keyserver: keyserver.ubuntu.com
id: "{{ item }}"
loop:
- 0E98404D386FA1D9
- 6ED0E7B82643E131
- apt_repository:
repo: deb http://deb.debian.org/debian buster-backports main
filename: buster-backports
state: present
- shell: apt-cache policy libseccomp2 | grep buster-backports -B1 | head -n1 | sed -e 's/^\s*\**\s*\(\S*\).*/\1/'
register: libseccomp2_version
- apt:
update_cache: yes
name: libseccomp2={{ libseccomp2_version.stdout_lines[0] }}
# https://docs.pi-hole.net/guides/dns/unbound/
- name: Set up Pi-hole as recursive DNS server
block:
- name: Install unbound
apt:
name: unbound
- name: Configure unbound
ansible.builtin.copy:
src: unbound.conf
dest: /etc/unbound/unbound.conf.d/pi-hole.conf
notify: Restart unbound
- name: Use the same limit for FTL as unbound
ansible.builtin.lineinfile:
path: /etc/dnsmasq.d/99-edns.conf
line: edns-packet-max=1232
create: true
- name: Disable resolvconf.conf entry for unbound
block:
- name: Disable unbound-resolvconf.service
service:
name: unbound-resolvconf
enabled: false
- name: Disable resolvconf_resolvers.conf from being generated
ansible.builtin.replace:
path: /etc/resolvconf.conf
regexp: '^unbound_conf='
replace: '#unbound_conf='
- name: Remove resolvconf_resolvers.conf
ansible.builtin.file:
path: /etc/unbound/unbound.conf.d/resolvconf_resolvers.conf
state: absent
notify: Restart unbound
handlers:
- name: Restart unbound
ansible.builtin.service:
name: unbound
state: restarted

@ -0,0 +1,44 @@
# https://www.raspberrypi.org/documentation/configuration/security.md
- hosts: on-fire-within
become: yes
tasks:
- name: disable ssh password logins
lineinfile:
path: /etc/ssh/sshd_config
regexp: '^(#\s*)?{{ item }} '
line: "{{ item }} no"
notify: reload ssh
with_items:
- ChallengeResponseAuthentication
- PasswordAuthentication
- UsePAM
- name: disable pi user
user:
name: pi
password: !
- name: install fail2ban
package:
name: fail2ban
state: present
- name: create jail.local
copy:
content: |
[sshd]
enabled = true
dest: /etc/fail2ban/jail.local
notify: reload fail2ban
handlers:
- name: reload ssh
service:
name: ssh
state: reloaded
- name: reload fail2ban
service:
name: fail2ban
state: reloaded

@ -0,0 +1,67 @@
# https://tailscale.com/download/linux/rpi
# TODO Conditionalize this on the OS and merge into ../playbooks/tailscale.yml
- name: Install Tailscale
hosts: on-fire-within
become: true
tasks:
# sudo apt-get install apt-transport-https
- name: Install apt-transport-https
ansible.builtin.package:
name: apt-transport-https
state: present
# curl -fsSL https://pkgs.tailscale.com/stable/raspbian/buster.gpg | sudo apt-key add -
- name: Add Tailscale signing key
ansible.builtin.apt_key:
url: https://pkgs.tailscale.com/stable/raspbian/buster.gpg
state: present
# curl -fsSL https://pkgs.tailscale.com/stable/raspbian/buster.list | sudo tee /etc/apt/sources.list.d/tailscale.list
- name: Add Tailscale apt repo
ansible.builtin.apt_repository:
repo: deb https://pkgs.tailscale.com/stable/raspbian buster main
state: present
filename: tailscale
# sudo apt-get update
- name: Update apt-get
ansible.builtin.apt:
update_cache: true
# sudo apt-get install tailscale
- name: Install Tailscale
ansible.builtin.package:
name: tailscale
state: present
- name: Restrict tailscaled logging
hosts: on-fire-within
become: true
tasks:
- name: Create systemd override dir for tailscaled
ansible.builtin.file:
path: /etc/systemd/system/tailscaled.service.d
state: directory
mode: "0644"
- name: Create systemd override
ansible.builtin.copy:
content: |
[Service]
LogLevelMax=notice
dest: /etc/systemd/system/tailscaled.service.d/override.conf
mode: "0644"
notify:
- Restart Tailscale
handlers:
- name: Restart Tailscale
ansible.builtin.systemd:
name: tailscaled
state: restarted
daemon_reload: true

@ -0,0 +1,66 @@
server:
# If no logfile is specified, syslog is used
# logfile: "/var/log/unbound/unbound.log"
verbosity: 1
interface: 127.0.0.1
port: 5335
do-ip4: yes
do-udp: yes
do-tcp: yes
# May be set to yes if you have IPv6 connectivity
do-ip6: no
# You want to leave this to no unless you have *native* IPv6. With 6to4 and
# Terredo tunnels your web browser should favor IPv4 for the same reasons
prefer-ip6: no
# Use this only when you downloaded the list of primary root servers!
# If you use the default dns-root-data package, unbound will find it automatically
#root-hints: "/var/lib/unbound/root.hints"
# Trust glue only if it is within the server's authority
harden-glue: yes
# Require DNSSEC data for trust-anchored zones, if such data is absent, the zone becomes BOGUS
harden-dnssec-stripped: yes
# Don't use Capitalization randomization as it known to cause DNSSEC issues sometimes
# see https://discourse.pi-hole.net/t/unbound-stubby-or-dnscrypt-proxy/9378 for further details
use-caps-for-id: no
# Reduce EDNS reassembly buffer size.
# IP fragmentation is unreliable on the Internet today, and can cause
# transmission failures when large DNS messages are sent via UDP. Even
# when fragmentation does work, it may not be secure; it is theoretically
# possible to spoof parts of a fragmented DNS message, without easy
# detection at the receiving end. Recently, there was an excellent study
# >>> Defragmenting DNS - Determining the optimal maximum UDP response size for DNS <<<
# by Axel Koolhaas, and Tjeerd Slokker (https://indico.dns-oarc.net/event/36/contributions/776/)
# in collaboration with NLnet Labs explored DNS using real world data from the
# the RIPE Atlas probes and the researchers suggested different values for
# IPv4 and IPv6 and in different scenarios. They advise that servers should
# be configured to limit DNS messages sent over UDP to a size that will not
# trigger fragmentation on typical network links. DNS servers can switch
# from UDP to TCP when a DNS response is too big to fit in this limited
# buffer size. This value has also been suggested in DNS Flag Day 2020.
edns-buffer-size: 1232
# Perform prefetching of close to expired message cache entries
# This only applies to domains that have been frequently queried
prefetch: yes
# One thread should be sufficient, can be increased on beefy machines. In reality for most users running on small networks or on a single machine, it should be unnecessary to seek performance enhancement by increasing num-threads above 1.
num-threads: 1
# Ensure kernel buffer is large enough to not lose messages in traffic spikes
so-rcvbuf: 1m
# Ensure privacy of local IP ranges
private-address: 192.168.0.0/16
private-address: 169.254.0.0/16
private-address: 172.16.0.0/12
private-address: 10.0.0.0/8
private-address: fd00::/8
private-address: fe80::/10

@ -0,0 +1,60 @@
# https://tailscale.com/download/linux/debian-bullseye
- name: Install Tailscale
hosts: all
become: true
tasks:
# curl -fsSL https://pkgs.tailscale.com/stable/debian/bullseye.noarmor.gpg | sudo tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
- name: Download Tailscale package signing key
ansible.builtin.get_url:
url: https://pkgs.tailscale.com/stable/debian/bullseye.noarmor.gpg
dest: /usr/share/keyrings/tailscale-archive-keyring.gpg
mode: "0644"
# curl -fsSL https://pkgs.tailscale.com/stable/debian/bullseye.tailscale-keyring.list | sudo tee /etc/apt/sources.list.d/tailscale.list
- name: Add Tailscale repository
ansible.builtin.apt_repository:
repo: deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/debian bullseye main
state: present
# sudo apt-get update
- name: Update apt-get
ansible.builtin.apt:
update_cache: true
# sudo apt-get install tailscale
- name: Install Tailscale
ansible.builtin.package:
name: tailscale
state: present
- name: Restrict tailscaled logging
hosts: all
become: true
tasks:
- name: Create systemd override dir for tailscaled
ansible.builtin.file:
path: /etc/systemd/system/tailscaled.service.d
state: directory
mode: "0644"
- name: Create systemd override
ansible.builtin.copy:
content: |
[Service]
LogLevelMax=notice
dest: /etc/systemd/system/tailscaled.service.d/override.conf
mode: "0644"
notify:
- Restart Tailscale
handlers:
- name: Restart Tailscale
ansible.builtin.systemd:
name: tailscaled
state: restarted
daemon_reload: true
# vim: ft=yaml.ansible

@ -0,0 +1,5 @@
- hosts: ramble-hard
vars_files:
- ../vars.private
tasks:

@ -0,0 +1,65 @@
---
- name: Set up Lets Encrypt
hosts: ramble-hard
vars_files:
- ../vars.private
tasks:
- apt:
update_cache: yes
- package:
name:
- certbot
- nginx
- service:
name: nginx
state: stopped
- command: >
certbot certonly --standalone --preferred-challenges http
-n --agree-tos -m {{ lets_encrypt.email }}
-d {{ tld }}
vars:
tld: "{{ item.value['subdomain'] | default(item.key) }}.{{ domain }}"
loop: "{{ apps | dict2items }}"
- service:
name: nginx
state: started
- template:
src: renew-certs
dest: /etc/cron.daily/renew-certs
mode: +x
- name: Set up nginx proxies
hosts: ramble-hard
vars_files:
- ../vars.private
tasks:
- template:
src: nginx.conf
dest: /etc/nginx/sites-available/{{ item.key }}.conf
vars:
server_name: "{{ item.value['subdomain'] | default(item.key) }}.{{ domain }}"
port: "{{ item.value['port'] }}"
loop: "{{ apps | dict2items }}"
notify: Restart nginx
- file:
src: /etc/nginx/sites-available/{{ item.key }}.conf
dest: /etc/nginx/sites-enabled/{{ item.key }}.conf
state: link
loop: "{{ apps | dict2items }}"
notify: Restart nginx
handlers:
- name: Restart nginx
service:
name: nginx
state: restarted

@ -0,0 +1,37 @@
server {
server_name {{ server_name }};
listen 80;
listen [::]:80;
location / {
return https://$server_name$request_uri;
}
}
server {
server_name {{ server_name }};
listen 443 ssl http2;
listen [::]:443 ssl http2;
ssl_trusted_certificate /etc/letsencrypt/live/{{ server_name }}/chain.pem;
ssl_certificate /etc/letsencrypt/live/{{ server_name }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ server_name }}/privkey.pem;
ssl_stapling on;
ssl_stapling_verify on;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 10m;
location / {
proxy_pass http://127.0.0.1:{{ port }};
}
}

@ -0,0 +1,2 @@
#!/bin/sh
certbot renew -w /var/lib/letsencrypt/ --pre-hook "systemctl stop nginx" --post-hook "systemctl start nginx"

@ -0,0 +1,118 @@
# https://docs.pleroma.social/backend/installation/otp_en/
---
- hosts: ramble-hard
become: true
tasks:
# arch="$(uname -m)";if [ "$arch" = "x86_64" ];then arch="amd64";elif [ "$arch" = "armv7l" ];then arch="arm";elif [ "$arch" = "aarch64" ];then arch="arm64";else echo "Unsupported arch: $arch">&2;fi;if getconf GNU_LIBC_VERSION>/dev/null;then libc_postfix="";elif [ "$(ldd 2>&1|head -c 9)" = "musl libc" ];then libc_postfix="-musl";elif [ "$(find /lib/libc.musl*|wc -l)" ];then libc_postfix="-musl";else echo "Unsupported libc">&2;fi;echo "$arch$libc_postfix" # noqa yaml[line-length]
- shell: |
arch="$(uname -m)"
if [ "$arch" = "x86_64" ]; then
arch="amd64";
elif [ "$arch" = "armv7l" ]; then
arch="arm";
elif [ "$arch" = "aarch64" ]; then
arch="arm64";
else
echo "Unsupported arch: $arch">&2;
fi;
if getconf GNU_LIBC_VERSION>/dev/null; then
libc_postfix="";
elif [ "$(ldd 2>&1|head -c 9)" = "musl libc" ]; then
libc_postfix="-musl";
elif [ "$(find /lib/libc.musl*|wc -l)" ]; then
libc_postfix="-musl";
else
echo "Unsupported libc">&2;
fi;
echo "$arch$libc_postfix"
register: arch_result
- set_fact:
pleroma_flavour: "{{ arch_result.stdout | trim }}"
- apt:
update_cache: true
# apt install curl unzip libncurses5 postgresql postgresql-contrib nginx certbot libmagic-dev
# apt install imagemagick ffmpeg libimage-exiftool-perl
# apt install postgresql-11-rum
- package:
name:
- curl
- unzip
- libncurses5
- postgresql
- postgresql-contrib
- nginx
- certbot
- libmagic-dev
- imagemagick
- ffmpeg
- libimage-exiftool-perl
# - postgresql-13-rum
notify:
- Restart postgres
# Create a Pleroma user
# adduser --system --shell /bin/false --home /opt/pleroma pleroma
- user:
name: pleroma
home: /opt/pleroma
shell: /bin/false
system: true
# Clone the release build into a temporary directory and unpack it
# su pleroma -s $SHELL -lc "
# curl 'https://git.pleroma.social/api/v4/projects/2/jobs/artifacts/stable/download?job=$FLAVOUR' -o /tmp/pleroma.zip
# unzip /tmp/pleroma.zip -d /tmp/
# "
- get_url:
url: https://git.pleroma.social/api/v4/projects/2/jobs/artifacts/stable/download?job={{ pleroma_flavour }}
dest: /tmp/pleroma.zip
- command: unzip /tmp/pleroma.zip -d /tmp/
# Move the release to the home directory and delete temporary files
# su pleroma -s $SHELL -lc "
# mv /tmp/release/* /opt/pleroma
# rmdir /tmp/release
# rm /tmp/pleroma.zip
# "
- copy:
src: /tmp/release/
dest: /opt/pleroma/
remote_src: true
owner: pleroma
- file:
path: "{{ item }}"
state: absent
loop:
- /tmp/release
- /tmp/pleroma.zip
# Create uploads directory and set proper permissions (skip if planning to use a remote uploader)
# Note: It does not have to be `/var/lib/pleroma/uploads`, the config generator will ask about the upload directory later
# mkdir -p /var/lib/pleroma/uploads
# chown -R pleroma /var/lib/pleroma
# Create custom public files directory (custom emojis, frontend bundle overrides, robots.txt, etc.)
# Note: It does not have to be `/var/lib/pleroma/static`, the config generator will ask about the custom public files directory later
# mkdir -p /var/lib/pleroma/static
# chown -R pleroma /var/lib/pleroma
# Create a config directory
# mkdir -p /etc/pleroma
# chown -R pleroma /etc/pleroma
- file:
path: "{{ item }}"
state: directory
owner: pleroma
loop:
- /var/lib/pleroma/uploads
- /var/lib/pleroma/static
- /etc/pleroma
handlers:
- name: Restart postgres
service:
name: postgresql
state: restarted

@ -0,0 +1,30 @@
# https://docs.pleroma.social/backend/installation/otp_en/
---
- hosts: ramble-hard
become: true
tasks:
# Create the postgres database
# su postgres -s $SHELL -lc "psql -f /tmp/setup_db.psql"
- command: psql -f /tmp/setup_db.psql
become_user: postgres
# Create the database schema
# su pleroma -s $SHELL -lc "./bin/pleroma_ctl migrate"
- command: ./bin/pleroma_ctl migrate
args:
chdir: /opt/pleroma
become_user: pleroma
# If you have installed RUM indexes uncomment and run
# su pleroma -s $SHELL -lc "./bin/pleroma_ctl migrate --migrations-path priv/repo/optional_migrations/rum_indexing/"
# - command: ./bin/pleroma_ctl migrate --migrations-path priv/repo/optional_migrations/rum_indexing/
# args:
# chdir: /opt/pleroma
# become_user: pleroma
handlers:
- name: Restart postgres
service:
name: postgresql
state: restarted

@ -0,0 +1,90 @@
# https://docs.pleroma.social/backend/installation/otp_en/
---
- hosts: ramble-hard
become: true
vars_files:
- ../vars.private
tasks:
- package:
name:
- certbot
- nginx
- service:
name: nginx
state: stopped
# certbot certonly --standalone --preferred-challenges http -d yourinstance.tld
- command: >
certbot certonly --standalone --preferred-challenges http
-n --agree-tos -m {{ lets_encrypt.email }}
-d {{ pleroma.tld }}
- service:
name: nginx
state: started
# cp /opt/pleroma/installation/pleroma.nginx /etc/nginx/sites-available/pleroma.conf
# ln -s /etc/nginx/sites-available/pleroma.conf /etc/nginx/sites-enabled/pleroma.conf
- copy:
src: /opt/pleroma/installation/pleroma.nginx
dest: /etc/nginx/sites-available/pleroma.conf
remote_src: true
notify: Restart nginx
# TODO: https://mastodon.bawue.social/@ixs/109514849935951693
- file:
src: /etc/nginx/sites-available/pleroma.conf
dest: /etc/nginx/sites-enabled/pleroma.conf
state: link
notify: Restart nginx
- replace:
path: /etc/nginx/sites-available/pleroma.conf
regexp: 'example\.tld'
replace: "{{ pleroma.tld }}"
notify: Restart nginx
# Copy the service into a proper directory
# cp /opt/pleroma/installation/pleroma.service /etc/systemd/system/pleroma.service
- copy:
src: /opt/pleroma/installation/pleroma.service
dest: /etc/systemd/system/pleroma.service
remote_src: true
# Start pleroma and enable it on boot
# systemctl start pleroma
# systemctl enable pleroma
notify: Restart pleroma
# Create the directory for webroot challenges
# mkdir -p /var/lib/letsencrypt
- file:
path: /var/lib/letsencrypt
state: directory
# Add it to the daily cron
# echo '#!/bin/sh
# certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --post-hook "systemctl reload nginx"
# ' > /etc/cron.daily/renew-pleroma-cert
# chmod +x /etc/cron.daily/renew-pleroma-cert
- ansible.builtin.copy:
content: |
\#!/bin/sh
certbot renew --cert-name {{ pleroma.tld }} --webroot -w /var/lib/letsencrypt/ --post-hook "systemctl reload nginx"
dest: /etc/cron.daily/renew-pleroma-cert
mode: +x
# - template:
# src: renew-pleroma-cert
# dest: /etc/cron.daily/renew-pleroma-cert
# mode: +x
handlers:
- name: Restart nginx
service:
name: nginx
state: restarted
- name: Restart pleroma
service:
name: pleroma
enabled: true
state: restarted

@ -0,0 +1,23 @@
```sh
ansible-playbook playbooks/pleroma/01.yml
su pleroma -s $SHELL -lc "./bin/pleroma_ctl instance gen --output /etc/pleroma/config.exs --output-psql /tmp/setup_db.psql"
ansible-playbook playbooks/pleroma/02.yml
# Start the instance to verify that everything is working as expected
su pleroma -s $SHELL -lc "./bin/pleroma daemon"
# Wait for about 20 seconds and query the instance endpoint, if it shows your
# uri, name and email correctly, you are configured correctly
sleep 20 && curl http://localhost:4000/api/v1/instance
# Stop the instance
su pleroma -s $SHELL -lc "./bin/pleroma stop"
ansible-playbook -l pleroma playbooks/pleroma/03.yml
cd /opt/pleroma
su pleroma -s $SHELL -lc "./bin/pleroma_ctl user new joeuser joeuser@sld.tld --admin"
su pleroma -s $SHELL -lc "./bin/pleroma_ctl config migrate_to_db"
```

@ -0,0 +1,29 @@
# https://tailscale.com/download/linux/debian-bullseye
- name: Install Tailscale
hosts: ramble-hard
become: true
tasks:
# curl -fsSL https://pkgs.tailscale.com/stable/debian/bullseye.noarmor.gpg | sudo tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
- name: Download Tailscale package signing key
ansible.builtin.get_url:
url: https://pkgs.tailscale.com/stable/debian/bullseye.noarmor.gpg
dest: /usr/share/keyrings/tailscale-archive-keyring.gpg
# curl -fsSL https://pkgs.tailscale.com/stable/debian/bullseye.tailscale-keyring.list | sudo tee /etc/apt/sources.list.d/tailscale.list
- name: Add Tailscale repository
ansible.builtin.apt_repository:
repo: deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/debian bullseye main
state: present
# sudo apt-get update
- name: Update apt-get
ansible.builtin.apt:
update_cache: true
# sudo apt-get install tailscale
- name: Install Tailscale
ansible.builtin.package:
name: tailscale
state: present
Loading…
Cancel
Save