Compare commits

...

No commits in common. "47b35143503b8956f0f388a0f1e6d8d9947b52a6" and "b8079666bd89cffd63b9ac0a0acca144106d4fe5" have entirely different histories.

41 changed files with 2330 additions and 11 deletions

56
.env.example Normal file
View File

@ -0,0 +1,56 @@
COMPOSE_PROFILES=
COMPOSE_PATH_SEPARATOR=:
COMPOSE_FILE=docker-compose.yml:adguardhome/docker-compose.yml:tandoor/docker-compose.yml:joplin/docker-compose.yml:homeassistant/docker-compose.yml:immich/docker-compose.yml
USER_ID=1000
GROUP_ID=1000
TIMEZONE="America/New_York"
CONFIG_ROOT="."
DATA_ROOT="/mnt/data"
DOWNLOAD_ROOT="/mnt/data/torrents"
IMMICH_UPLOAD_LOCATION="/mnt/data/photos"
PIA_LOCATION=ca
PIA_USER=
PIA_PASS=
PIA_LOCAL_NETWORK="192.168.0.0/16"
HOSTNAME=localhost
HOMEASSISTANT_HOSTNAME=
IMMICH_HOSTNAME=
ADGUARD_HOSTNAME=
ADGUARD_USERNAME=
ADGUARD_PASSWORD=
QBITTORRENT_USERNAME=admin
QBITTORRENT_PASSWORD=adminadmin
DNS_CHALLENGE=true
DNS_CHALLENGE_PROVIDER=cloudflare
LETS_ENCRYPT_CA_SERVER="https://acme-v02.api.letsencrypt.org/directory"
LETS_ENCRYPT_EMAIL=
CLOUDFLARE_EMAIL=
CLOUDFLARE_DNS_API_TOKEN=
CLOUDFLARE_ZONE_API_TOKEN=
SONARR_API_KEY=
RADARR_API_KEY=
LIDARR_API_KEY=
PROWLARR_API_KEY=
BAZARR_API_KEY=
JELLYFIN_API_KEY=
JELLYSEERR_API_KEY=
SABNZBD_API_KEY=
IMMICH_API_KEY=
HOMEASSISTANT_ACCESS_TOKEN=
HOMEPAGE_VAR_TITLE="Docker-Compose NAS"
HOMEPAGE_VAR_SEARCH_PROVIDER=google
HOMEPAGE_VAR_HEADER_STYLE=boxed
HOMEPAGE_VAR_WEATHER_CITY=
HOMEPAGE_VAR_WEATHER_LAT=
HOMEPAGE_VAR_WEATHER_LONG=
HOMEPAGE_VAR_WEATHER_UNIT=metric
IMMICH_DB_PASSWORD=postgres
CALIBRE_USERNAME=admin
CALIBRE_PASSWORD=admin123
DECLUTTARR_TEST_RUN=True
DECLUTTARR_REMOVE_TIMER=60
DECLUTTARR_REMOVE_FAILED=True
DECLUTTARR_REMOVE_FAILED_IMPORTS=True
DECLUTTARR_REMOVE_METADATA_MISSING=True
DECLUTTARR_REMOVE_MISSING_FILES=True
DECLUTTARR_REMOVE_ORPHANS=True

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
*.html linguist-detectable=false

1
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1 @@
github: AdrienPoupa

13
.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,13 @@
on: push
jobs:
validate-docker-compose:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Docker Compose
uses: ndeloof/install-compose-action@v0.0.1
- name: Setup Environment Variables
run: cp .env.example .env
- name: Validate Docker Compose
run: docker-compose config

22
.gitignore vendored Normal file
View File

@ -0,0 +1,22 @@
*.env
.idea
docker-compose.override.yml
/homepage/logs
/homepage/*.yaml
/homepage/*.css
/homepage/*.js
/sonarr
/radarr
/prowlarr
/lidarr
/bazarr
/qbittorrent
/pia
/pia-shared
/letsencrypt
/jellyseerr
/adguardhome/certs
/adguardhome/conf
/adguardhome/work
/sabnzbd
/calibre-web

View File

@ -1,9 +0,0 @@
MIT License
Copyright (c) 2025 aki
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

587
README.md
View File

@ -1,3 +1,586 @@
# docker-compose-nas
# Docker Compose NAS
Simple Docker Compose NAS featuring Sonarr, Radarr, Prowlarr, Jellyfin, qBittorrent, PIA VPN and Traefik with SSL support
After searching for the perfect NAS solution, I realized what I wanted could be achieved
with some Docker containers on a vanilla Linux box. The result is an opinionated Docker Compose configuration capable of
browsing indexers to retrieve media resources and downloading them through a WireGuard VPN with port forwarding.
SSL certificates and remote access through Tailscale are supported.
Requirements: Any Docker-capable recent Linux box with Docker Engine and Docker Compose V2.
I am running it in Ubuntu Server 22.04; I also tested this setup on a [Synology DS220+ with DSM 7.1](#synology-quirks).
![Docker-Compose NAS Homepage](https://github.com/AdrienPoupa/docker-compose-nas/assets/15086425/3492a9f6-3779-49a5-b052-4193844f16f0)
## Table of Contents
<!-- TOC -->
* [Docker Compose NAS](#docker-compose-nas)
* [Table of Contents](#table-of-contents)
* [Applications](#applications)
* [Quick Start](#quick-start)
* [Environment Variables](#environment-variables)
* [PIA WireGuard VPN](#pia-wireguard-vpn)
* [Sonarr, Radarr & Lidarr](#sonarr-radarr--lidarr)
* [File Structure](#file-structure)
* [Download Client](#download-client)
* [Prowlarr](#prowlarr)
* [qBittorrent](#qbittorrent)
* [Jellyfin](#jellyfin)
* [Homepage](#homepage)
* [Jellyseerr](#jellyseerr)
* [Traefik and SSL Certificates](#traefik-and-ssl-certificates)
* [Accessing from the outside with Tailscale](#accessing-from-the-outside-with-tailscale)
* [Optional Services](#optional-services)
* [FlareSolverr](#flaresolverr)
* [SABnzbd](#sabnzbd)
* [AdGuard Home](#adguard-home)
* [Encryption](#encryption)
* [DHCP](#dhcp)
* [Expose DNS Server with Tailscale](#expose-dns-server-with-tailscale)
* [Calibre-Web](#calibre-web)
* [Decluttarr](#decluttarr)
* [Tandoor](#tandoor)
* [Joplin](#joplin)
* [Home Assistant](#home-assistant)
* [Immich](#immich)
* [Customization](#customization)
* [Optional: Using the VPN for *arr apps](#optional-using-the-vpn-for-arr-apps)
* [Synology Quirks](#synology-quirks)
* [Free Ports 80 and 443](#free-ports-80-and-443)
* [Install Synology WireGuard](#install-synology-wireguard)
* [Free Port 1900](#free-port-1900)
* [User Permissions](#user-permissions)
* [Synology DHCP Server and Adguard Home Port Conflict](#synology-dhcp-server-and-adguard-home-port-conflict)
* [Use Separate Paths for Torrents and Storage](#use-separate-paths-for-torrents-and-storage)
* [NFS Share](#nfs-share)
* [Static IP](#static-ip)
* [Laptop Specific Configuration](#laptop-specific-configuration)
<!-- TOC -->
## Applications
| **Application** | **Description** | **Image** | **URL** |
|--------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------|--------------|
| [Sonarr](https://sonarr.tv) | PVR for newsgroup and bittorrent users | [linuxserver/sonarr](https://hub.docker.com/r/linuxserver/sonarr) | /sonarr |
| [Radarr](https://radarr.video) | Movie collection manager for Usenet and BitTorrent users | [linuxserver/radarr](https://hub.docker.com/r/linuxserver/radarr) | /radarr |
| [Bazarr](https://www.bazarr.media/) | Companion application to Sonarr and Radarr that manages and downloads subtitles | [linuxserver/bazarr](https://hub.docker.com/r/linuxserver/bazarr) | /bazarr |
| [Prowlarr](https://github.com/Prowlarr/Prowlarr) | Indexer aggregator for Sonarr and Radarr | [linuxserver/prowlarr:latest](https://hub.docker.com/r/linuxserver/prowlarr) | /prowlarr |
| [PIA WireGuard VPN](https://github.com/thrnz/docker-wireguard-pia) | Encapsulate qBittorrent traffic in [PIA](https://www.privateinternetaccess.com/) using [WireGuard](https://www.wireguard.com/) with port forwarding. | [thrnz/docker-wireguard-pia](https://hub.docker.com/r/thrnz/docker-wireguard-pia) | |
| [qBittorrent](https://www.qbittorrent.org) | Bittorrent client with a complete web UI<br/>Uses VPN network<br/>Using Libtorrent 1.x | [linuxserver/qbittorrent:libtorrentv1](https://hub.docker.com/r/linuxserver/qbittorrent) | /qbittorrent |
| [Unpackerr](https://unpackerr.zip) | Automated Archive Extractions | [golift/unpackerr](https://hub.docker.com/r/golift/unpackerr) | |
| [Jellyfin](https://jellyfin.org) | Media server designed to organize, manage, and share digital media files to networked devices | [linuxserver/jellyfin](https://hub.docker.com/r/linuxserver/jellyfin) | /jellyfin |
| [Jellyseer](https://jellyfin.org) | Manages requests for your media library | [fallenbagel/jellyseerr](https://hub.docker.com/r/fallenbagel/jellyseerr) | /jellyseer |
| [Homepage](https://gethomepage.dev) | Application dashboard | [gethomepage/homepage](https://github.com/gethomepage/homepage/pkgs/container/homepage) | / |
| [Traefik](https://traefik.io) | Reverse proxy | [traefik](https://hub.docker.com/_/traefik) | |
| [Watchtower](https://containrrr.dev/watchtower/) | Automated Docker images update | [containrrr/watchtower](https://hub.docker.com/r/containrrr/watchtower) | |
| [Autoheal](https://github.com/willfarrell/docker-autoheal/) | Monitor and restart unhealthy Docker containers | [willfarrell/autoheal](https://hub.docker.com/r/willfarrell/autoheal) | |
| [Lidarr](https://lidarr.audio) | Optional - Music collection manager for Usenet and BitTorrent users<br/>Enable with `COMPOSE_PROFILES=lidarr` | [linuxserver/lidarr](https://hub.docker.com/r/linuxserver/lidarr) | /lidarr |
| [SABnzbd](https://sabnzbd.org/) | Optional - Free and easy binary newsreader<br/>Enable with `COMPOSE_PROFILES=sabnzbd` | [linuxserver/sabnzbd](https://hub.docker.com/r/linuxserver/sabnzbd) | /sabnzbd |
| [FlareSolverr](https://github.com/FlareSolverr/FlareSolverr) | Optional - Proxy server to bypass Cloudflare protection in Prowlarr<br/>Enable with `COMPOSE_PROFILES=flaresolverr` | [flaresolverr/flaresolverr](https://hub.docker.com/r/flaresolverr/flaresolverr) | |
| [AdGuard Home](https://adguard.com/en/adguard-home/overview.html) | Optional - Network-wide software for blocking ads & tracking<br/>Enable with `COMPOSE_PROFILES=adguardhome` | [adguard/adguardhome](https://hub.docker.com/r/adguard/adguardhome) | |
| [Tandoor](https://tandoor.dev) | Optional - Smart recipe management<br/>Enable with `COMPOSE_PROFILES=tandoor` | [vabene1111/recipes](https://hub.docker.com/r/vabene1111/recipes) | /recipes |
| [Joplin](https://joplinapp.org) | Optional - Note taking application<br/>Enable with `COMPOSE_PROFILES=joplin` | [joplin/server](https://hub.docker.com/r/joplin/server) | /joplin |
| [Home Assistant](https://www.home-assistant.io) | Optional - Open source home automation that puts local control and privacy first<br/>Enable with `COMPOSE_PROFILES=homeassistant` | [home-assistant/home-assistant:stable](https://ghcr.io/home-assistant/home-assistant) | |
| [Immich](https://immich.app) | Optional - Self-hosted photo and video management solution<br/>Enable with `COMPOSE_PROFILES=immich` | [immich-app/immich-server:release](https://ghcr.io/immich-app/immich-server) | |
| [Calibre-Web](https://github.com/janeczku/calibre-web) | Optional - Web app for browsing, reading and downloading eBooks stored in a Calibre database<br/>Enable with `COMPOSE_PROFILES=calibre-web` | [linuxserver/calibre-web](https://hub.docker.com/r/linuxserver/calibre-web) | /calibre |
| [Decluttarr](https://github.com/ManiMatter/decluttarr) | Optional - Keeps the download queues free of stalled and redundant downloads. <br/>Enable with `COMPOSE_PROFILES=decluttarr` | [manimatter/decluttarr:latest](https://ghcr.io/manimatter/decluttarr:latest) | |
Optional containers are not enabled by default, they need to be enabled,
see [Optional Services](#optional-services) for more information.
## Quick Start
`cp .env.example .env`, edit to your needs then `docker compose up -d`.
For the first time, run `./update-config.sh` to update the applications base URLs and set the API keys in `.env`.
If you want to show Jellyfin information in the homepage, create it in Jellyfin settings and fill `JELLYFIN_API_KEY`.
## Environment Variables
| Variable | Description | Default |
|--------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------|
| `COMPOSE_FILE` | Docker compose files to load | |
| `COMPOSE_PROFILES` | Docker compose profiles to load (`flaresolverr`, `adguardhome`, `sabnzbd`) | |
| `USER_ID` | ID of the user to use in Docker containers | `1000` |
| `GROUP_ID` | ID of the user group to use in Docker containers | `1000` |
| `TIMEZONE` | TimeZone used by the container. | `America/New_York` |
| `CONFIG_ROOT` | Host location for configuration files | `.` |
| `DATA_ROOT` | Host location of the data files | `/mnt/data` |
| `DOWNLOAD_ROOT` | Host download location for qBittorrent, should be a subfolder of `DATA_ROOT` | `/mnt/data/torrents` |
| `PIA_LOCATION` | Servers to use for PIA. [see list here](https://serverlist.piaservers.net/vpninfo/servers/v6) | `ca` (Montreal, Canada) |
| `PIA_USER` | PIA username | |
| `PIA_PASS` | PIA password | |
| `PIA_LOCAL_NETWORK` | PIA local network | `192.168.0.0/16` |
| `HOSTNAME` | Hostname of the NAS, could be a local IP or a domain name | `localhost` |
| `ADGUARD_HOSTNAME` | Optional - AdGuard Home hostname used, if enabled | |
| `ADGUARD_USERNAME` | Optional - AdGuard Home username to show details in the homepage, if enabled | |
| `ADGUARD_PASSWORD` | Optional - AdGuard Home password to show details in the homepage, if enabled | |
| `QBITTORRENT_USERNAME` | qBittorrent username to access the web UI | `admin` |
| `QBITTORRENT_PASSWORD` | qBittorrent password to access the web UI | `adminadmin` |
| `DNS_CHALLENGE` | Enable/Disable DNS01 challenge, set to `false` to disable. | `true` |
| `DNS_CHALLENGE_PROVIDER` | Provider for DNS01 challenge, [see list here](https://doc.traefik.io/traefik/https/acme/#providers). | `cloudflare` |
| `LETS_ENCRYPT_CA_SERVER` | Let's Encrypt CA Server used to generate certificates, set to production by default.<br/>Set to `https://acme-staging-v02.api.letsencrypt.org/directory` to test your changes with the staging server. | `https://acme-v02.api.letsencrypt.org/directory` |
| `LETS_ENCRYPT_EMAIL` | E-mail address used to send expiration notifications | |
| `CLOUDFLARE_EMAIL` | CloudFlare Account email | |
| `CLOUDFLARE_DNS_API_TOKEN` | API token with `DNS:Edit` permission | |
| `CLOUDFLARE_ZONE_API_TOKEN` | API token with `Zone:Read` permission | |
| `SONARR_API_KEY` | Sonarr API key to show information in the homepage | |
| `RADARR_API_KEY` | Radarr API key to show information in the homepage | |
| `LIDARR_API_KEY` | Lidarr API key to show information in the homepage | |
| `PROWLARR_API_KEY` | Prowlarr API key to show information in the homepage | |
| `BAZARR_API_KEY` | Bazarr API key to show information in the homepage | |
| `JELLYFIN_API_KEY` | Jellyfin API key to show information in the homepage | |
| `JELLYSEERR_API_KEY` | Jellyseer API key to show information in the homepage | |
| `SABNZBD_API_KEY` | Sabnzbd API key to show information in the homepage | |
| `HOMEPAGE_VAR_TITLE` | Title of the homepage | `Docker-Compose NAS` |
| `HOMEPAGE_VAR_SEARCH_PROVIDER` | Homepage search provider, [see list here](https://gethomepage.dev/en/widgets/search/) | `google` |
| `HOMEPAGE_VAR_HEADER_STYLE` | Homepage header style, [see list here](https://gethomepage.dev/en/configs/settings/#header-style) | `boxed` |
| `HOMEPAGE_VAR_WEATHER_CITY` | Homepage weather city name | |
| `HOMEPAGE_VAR_WEATHER_LAT` | Homepage weather city latitude | |
| `HOMEPAGE_VAR_WEATHER_LONG` | Homepage weather city longitude | |
| `HOMEPAGE_VAR_WEATHER_UNIT` | Homepage weather unit, either `metric` or `imperial` | `metric` |
| `CALIBRE_USERNAME` | Optional - Calibre-Web username to show details in the homepage, if enabled | `admin` |
| `CALIBRE_PASSWORD` | Optional - Calibre-Web password to show details in the homepage, if enabled | `admin123` |
## PIA WireGuard VPN
I chose PIA since it supports WireGuard and [port forwarding](https://github.com/thrnz/docker-wireguard-pia/issues/26#issuecomment-868165281),
but you could use other providers:
- OpenVPN: [linuxserver/openvpn-as](https://hub.docker.com/r/linuxserver/openvpn-as)
- WireGuard: [linuxserver/wireguard](https://hub.docker.com/r/linuxserver/wireguard)
- NordVPN + OpenVPN: [bubuntux/nordvpn](https://hub.docker.com/r/bubuntux/nordvpn/dockerfile)
- NordVPN + WireGuard (NordLynx): [bubuntux/nordlynx](https://hub.docker.com/r/bubuntux/nordlynx)
For PIA + WireGuard, fill `.env` and fill it with your PIA credentials.
The location of the server it will connect to is set by `LOC=ca`, defaulting to Montreal - Canada.
You need to fill the credentials in the `PIA_*` environment variable,
otherwise the VPN container will exit and qBittorrent will not start.
## Sonarr, Radarr & Lidarr
### File Structure
Sonarr, Radarr, and Lidarr must be configured to support hardlinks, to allow instant moves and prevent using twice the storage
(Bittorrent downloads and final file). The trick is to use a single volume shared by the Bittorrent client and the *arrs.
Subfolders are used to separate the TV shows from the movies.
The configuration is well explained by [this guide](https://trash-guides.info/Hardlinks/How-to-setup-for/Docker/).
In summary, the final structure of the shared volume will be as follows:
```
data
├── torrents = shared folder qBittorrent downloads
│ ├── movies = movies downloads tagged by Radarr
│ └── tv = movies downloads tagged by Sonarr
└── media = shared folder for Sonarr and Radarr files
├── movies = Radarr
└── tv = Sonarr
└── music = Lidarr
```
Go to Settings > Management.
In Sonarr, set the Root folder to `/data/media/tv`.
In Radarr, set the Root folder to `/data/media/movies`.
In Lidarr, set the Root folder to `/data/media/music`.
### Download Client
Then qBittorrent can be configured at Settings > Download Clients. Because all the networking for qBittorrent takes
place in the VPN container, the hostname for qBittorrent is the hostname of the VPN container, ie `vpn`, and the port is `8080`:
## Prowlarr
The indexers are configured through Prowlarr. They synchronize automatically to Radarr and Sonarr.
Radarr and Sonarr may then be added via Settings > Apps. The Prowlarr server is `http://prowlarr:9696/prowlarr`, the Radarr server
is `http://radarr:7878/radarr` Sonarr `http://sonarr:8989/sonarr`, and Lidarr `http://lidarr:8686/lidarr`.
Their API keys can be found in Settings > Security > API Key.
## qBittorrent
Running `update-config.sh` will set qBittorrent's password to `adminadmin`. If you wish to update the password manually,
since qBittorrent v4.6.2, a temporary password is generated on startup. Get it with `docker compose logs qbittorrent`:
```
The WebUI administrator username is: admin
The WebUI administrator password was not set. A temporary password is provided for this session: <some_password>
```
Use this password to access the UI, then go to Settings > Web UI and set your own password,
then set it in `.env`'s `QBITTORRENT_PASSWORD` variable.
The login page can be disabled on for the local network in by enabling `Bypass authentication for clients`.
```
192.168.0.0/16
127.0.0.0/8
172.17.0.0/16
```
Set the default save path to `/data/torrents` in Settings, and restrict the network interface to WireGuard (`wg0`).
To use the VueTorrent WebUI just go to `qBittorrent`, `Options`, `Web UI`, `Use Alternative WebUI`, and enter `/vuetorrent`. Special thanks to gabe565 for the easy enablement with (https://github.com/gabe565/linuxserver-mod-vuetorrent).
## Jellyfin
To enable [hardware transcoding](https://jellyfin.org/docs/general/administration/hardware-acceleration/),
depending on your system, you may need to add the following block:
```
devices:
- /dev/dri/renderD128:/dev/dri/renderD128
- /dev/dri/card0:/dev/dri/card0
```
Generally, running Docker on Linux you will want to use VA-API, but the exact mount paths may differ depending on your
hardware.
## Homepage
The homepage comes with sensible defaults; some settings can ben controlled via environment variables in `.env`.
If you to customize further, you can modify the files in `/homepage/*.yaml` according to the [documentation](https://gethomepage.dev).
Due to how the Docker socket is configured for the Docker integration, files must be edited as root.
The files in `/homepage/tpl/*.yaml` only serve as a base to set up the homepage configuration on first run.
## Jellyseerr
Jellyseer gives you content recommendations, allows others to make requests to you, and allows logging in with Jellyfin credentials.
To set up, go to https://hostname/jellyseerr/setup, and set the URLs as follows:
- Jellyfin: http://jellyfin:8096/jellyfin
- Radarr:
- Hostname: radarr
- Port: 7878
- URL Base: /radarr
- Sonarr
- Hostname: sonarr
- Port: 8989
- URL Base: /sonarr
## Traefik and SSL Certificates
While you can use the private IP to access your NAS, how cool would it be for it to be accessible through a subdomain
with a valid SSL certificate?
Traefik makes this trivial by using Let's Encrypt and one of its
[supported ACME challenge providers](https://doc.traefik.io/traefik/https/acme).
Let's assume we are using `nas.domain.com` as custom subdomain.
The idea is to create an A record pointing to the private IP of the NAS, `192.168.0.10` for example:
```
nas.domain.com. 1 IN A 192.168.0.10
```
The record will be publicly exposed but not resolve given this is a private IP.
Given the NAS is not accessible from the internet, we need to do a dnsChallenge.
Here we will be using CloudFlare, but the mechanism will be the same for all DNS providers
baring environment variable changes, see the Traefik documentation above and [Lego's documentation](https://go-acme.github.io/lego/dns).
Then, fill the CloudFlare `.env` entries.
If you want to test your configuration first, use the Let's Encrypt staging server by updating `LETS_ENCRYPT_CA_SERVER`'s
value in `.env`:
```
LETS_ENCRYPT_CA_SERVER=https://acme-staging-v02.api.letsencrypt.org/directory
```
If it worked, you will see the staging certificate at https://nas.domain.com.
You may remove the `./letsencrypt/acme.json` file and restart the services to issue the real certificate.
You are free to use any DNS01 provider. Simply replace `DNS_CHALLENGE_PROVIDER` with your own provider,
[see complete list here](https://doc.traefik.io/traefik/https/acme/#providers).
You will also need to inject the environments variables specific to your provider.
Certificate generation can be disabled by setting `DNS_CHALLENGE` to `false`.
### Accessing from the outside with Tailscale
If we want to make it reachable from outside the network without opening ports or exposing it to the internet, I found
[Tailscale](https://tailscale.com) to be a great solution: create a network, run the client on both the NAS and the device
you are connecting from, and they will see each other.
In this case, the A record should point to the IP Tailscale assigned to the NAS, eg `100.xxx.xxx.xxx`:
```
nas.domain.com. 1 IN A 100.xxx.xxx.xxx
```
See [here](https://tailscale.com/kb/installation) for installation instructions.
However, this means you will always need to be connected to Tailscale to access your NAS, even locally.
This can be remedied by overriding the DNS entry for the NAS domain like `192.168.0.10 nas.domain.com`
in your local DNS resolver such as Pi-Hole.
This way, when connected to the local network, the NAS is accessible directly from the private IP,
and from the outside you need to connect to Tailscale first, then the NAS domain will be accessible.
## Optional Services
Optional services are not launched by default and enabled by appending their profile name to the
`COMPOSE_PROFILES` environment variable (see [Docker documentation](https://docs.docker.com/compose/profiles)).
Say you want to enable FlareSolverr, you should have `COMPOSE_PROFILES=flaresolverr`.
Multiple optional services can be enabled separated by commas: `COMPOSE_PROFILES=flaresolverr,adguardhome`.
### FlareSolverr
In Prowlarr, add the FlareSolverr indexer with the URL http://flaresolverr:8191/
### SABnzbd
Enable SABnzbd by setting `COMPOSE_PROFILES=sabnzbd`. It will be accessible at `/sabnzbd`.
If that is not the case, the `url_base` parameter in `sabnzbd.ini` should be set to `/sabnzbd`.
Additionally, `host_whitelist` value should be set to your hostname.
### AdGuard Home
Enable AdGuard Home by setting `COMPOSE_PROFILES=adguardhome`.
Set the `ADGUARD_HOSTNAME`, I chose a different subdomain to use secure DNS without the folder.
On first run, specify the port 3000 and enable listen on all interfaces to make it work with Tailscale.
If after running `docker compose up -d`, you're getting `network docker-compose-nas declared as external, but could not be found`,
run `docker network create docker-compose-nas` first.
#### Encryption
In Settings > Encryption Settings, set the certificates path to `/opt/adguardhome/certs/certs/<YOUR_HOSTNAME>.crt`
and the private key to `/opt/adguardhome/certs/private/<YOUR_HOSTNAME>.key`, those files are created by Traefik cert dumper
from the ACME certificates Traefik generates in JSON.
#### DHCP
If you want to use the AdGuard Home DHCP server, for example because your router does not allow changing its DNS server,
you will need to select the `eth0` DHCP interface matching `10.0.0.10`, then specify the
Gateway IP to match your router address (`192.168.0.1` for example) and set a range of IP addresses assigned to local
devices.
In `adguardhome/docker-compose.yml`, set the network interface `dhcp-relay` should listen to. By default, it is set to
`enp2s0`, but you may need to change it to your host's network interface, verify it with `ip a`.
In the configuration (`adguardhome/conf/AdGuardHome.yaml`), set the DHCP options 6th key to your NAS internal IP address:
```yml
dhcp:
dhcpv4:
options:
- 6 ips 192.168.0.10,192.168.0.10
```
Enable DHCP Relay by setting `COMPOSE_PROFILES=adguardhome-dhcp`.
#### Expose DNS Server with Tailscale
Based on [Tailscale's documentation](https://tailscale.com/kb/1114/pi-hole), it is easy to use your AdGuard server everywhere.
Just make sure that AdGuard Home listens to all interfaces.
### Calibre-Web
If you do not have a Calibre database, download a sample from: https://github.com/janeczku/calibre-web/raw/master/library/metadata.db
and place it in `${DATA_ROOT}/books`.
On the initial setup screen, enter `/books` as your calibre library location.
**Default admin login:** Username: `admin` Password: `admin123`.
Unrar is included by default and needs to be set in the Calibre-Web admin page (Basic Configuration:External Binaries)
with a path of `/usr/bin/unrar`.
### Decluttarr
Decluttarr keeps the queue free of stalled and redundant downloads. For configuration options and examples,
please see https://github.com/ManiMatter/decluttarr/blob/dev/README.md.
All environment variables are prefixed with `DECLUTTARR_`.
### Tandoor
See [here](./tandoor/README.md).
### Joplin
See [here](./joplin/README.md).
### Home Assistant
See [here](./homeassistant/README.md).
### Immich
See [here](./immich/README.md).
## Customization
You can override the configuration of a service or add new services by creating a new `docker-compose.override.yml` file,
then appending it to the `COMPOSE_FILE` environment variable: `COMPOSE_FILE=docker-compose.yml:docker-compose.override.yml`
[See official documentation](https://docs.docker.com/compose/extends).
For example, use a [different VPN provider](https://github.com/bubuntux/nordvpn):
```yml
services:
vpn:
image: ghcr.io/bubuntux/nordvpn
cap_add:
- NET_ADMIN # Required
- NET_RAW # Required
environment: # Review https://github.com/bubuntux/nordvpn#environment-variables
- USER=user@email.com # Required
- "PASS=pas$word" # Required
- CONNECT=United_States
- TECHNOLOGY=NordLynx
- NETWORK=192.168.1.0/24 # So it can be accessed within the local network
```
### Optional: Using the VPN for *arr apps
If you want to use the VPN for Prowlarr and other *arr applications, add the following block to all the desired containers:
```yml
network_mode: "service:vpn"
depends_on:
vpn:
condition: service_healthy
```
Change the healthcheck to mark the containers as unhealthy when internet connection is not working by appending a URL
to the healthcheck, eg: `test: [ "CMD", "curl", "--fail", "http://127.0.0.1:7878/radarr/ping", "https://google.com" ]`
Then in Prowlarr, use `localhost` rather than `vpn` as the hostname, since they are on the same network.
## Synology Quirks
Docker compose NAS can run on DSM 7.1, with a few extra steps.
### Free Ports 80 and 443
By default, ports 80 and 443 are used by Nginx but not actually used for anything useful. Free them by creating a new task
in the Task Scheduler > Create > Triggered Task > User-defined script. Leave the Event as `Boot-up` and the `root` user,
go to Task Settings and paste the following in User-defined script:
```
sed -i -e 's/80/81/' -e 's/443/444/' /usr/syno/share/nginx/server.mustache /usr/syno/share/nginx/DSM.mustache /usr/syno/share/nginx/WWWService.mustache
synosystemctl restart nginx
```
### Install Synology WireGuard
Since WireGuard is not part of DSM's kernel, an external package must be installed for the `vpn` container to run.
For DSM 7.1, download and install the package corresponding to your NAS CPU architecture
[from here](https://github.com/vegardit/synology-wireguard/releases).
As specified in the [project's README](https://github.com/vegardit/synology-wireguard#installation),
the package must be run as `root` from the command line: `sudo /var/packages/WireGuard/scripts/start`
### Free Port 1900
Jellyfin will fail to run by default since the port 1900
[is not free](https://lookanotherblog.com/resolve-port-1900-conflict-between-plex-and-synology/).
You may free it by going to Control Panel > File Services > Advanced > SSTP > Untick `Enable Windows network discovery`.
### User Permissions
By default, the user and groups are set to `1000` as it is the default on Ubuntu and many other Linux distributions.
However, that is not the case in Synology; the first user should have an ID of `1026` and a group of `100`.
You may check yours with `id`.
Update the `USER_ID` and `GROUP_ID` in `.env` with your IDs.
Not updating them may result in [permission issues](https://github.com/AdrienPoupa/docker-compose-nas/issues/10).
```
USER_ID=1026
GROUP_ID=100
```
### Synology DHCP Server and Adguard Home Port Conflict
If you are using the Synology DHCP Server package, it will use port 53 even if it does not need it. This is because
it uses Dnsmasq to handle DHCP requests, but does not serve DNS queries. The port can be released by editing (as root)
`/usr/local/lib/systemd/system/pkg-dhcpserver.service` and [adding -p 0](https://www.reddit.com/r/synology/comments/njwdao/comment/j2d23qr/?utm_source=reddit&utm_medium=web2x&context=3):
`ExecStart=/var/packages/DhcpServer/target/dnsmasq-2.x/usr/bin/dnsmasq --user=DhcpServer --group=DhcpServer --cache-size=200 --conf-file=/etc/dhcpd/dhcpd.conf --dhcp-lease-max=2147483648 -p 0`
Reboot the NAS and the port 53 will be free for Adguard.
## Use Separate Paths for Torrents and Storage
If you want to use separate paths for torrents download and long term storage, to use different disks for example,
set your `docker-compose.override.yml` to:
```yml
services:
sonarr:
volumes:
- ./sonarr:/config
- ${DATA_ROOT}/media/tv:/data/media/tv
- ${DOWNLOAD_ROOT}/tv:/data/torrents/tv
radarr:
volumes:
- ./radarr:/config
- ${DATA_ROOT}/media/movies:/data/media/movies
- ${DOWNLOAD_ROOT}/movies:/data/torrents/movies
```
Note you will lose the hard link ability, ie your files will be duplicated.
In Sonarr and Radarr, go to `Settings` > `Importing` > Untick `Use Hardlinks instead of Copy`
## NFS Share
This can be useful to share the media folder to a local player like Kodi or computers in the local network,
but may not be necessary if Jellyfin is going to be used to access the media.
Install the NFS kernel server: `sudo apt install nfs-kernel-server`
Then edit `/etc/exports` to configure your shares:
`/mnt/data/media 192.168.0.0/255.255.255.0(rw,all_squash,nohide,no_subtree_check,anonuid=1000,anongid=1000)`
This will share the `media` folder to anybody on your local network (192.168.0.x).
I purposely left out the `sync` flag that would slow down file transfer.
On [some devices](https://forum.kodi.tv/showthread.php?tid=343434) you may need to use the `insecure`
option for the share to be available.
Restart the NFS server to apply the changes: `sudo /etc/init.d/nfs-kernel-server restart`
On other machines, you can see the shared folder by adding the following to your `/etc/fstab`:
`192.168.0.10:/mnt/data/media /mnt/nas nfs ro,hard,intr,auto,_netdev 0 0`
## Static IP
Set a static IP, assuming `192.168.0.10` and using Google DNS servers: `sudo nano /etc/netplan/00-installer-config.yaml`
```yaml
# This is the network config written by 'subiquity'
network:
ethernets:
enp2s0:
dhcp4: no
addresses:
- 192.168.0.10/24
gateway4: 192.168.0.1
nameservers:
addresses: [8.8.8.8, 8.8.4.4]
version: 2
```
Apply the plan: `sudo netplan apply`. You can check the server uses the right IP with `ip a`.
## Laptop Specific Configuration
If the server is installed on a laptop, you may want to disable the suspension when the lid is closed:
`sudo nano /etc/systemd/logind.conf`
Replace:
- `#HandleLidSwitch=suspend` by `HandleLidSwitch=ignore`
- `#LidSwitchIgnoreInhibited=yes` by `LidSwitchIgnoreInhibited=no`
Then restart: `sudo service systemd-logind restart`

View File

@ -0,0 +1,84 @@
services:
dhcp-relay:
image: modem7/dhcprelay:latest
container_name: dhcp-relay
restart: always
depends_on:
- adguardhome
command: ["-id", "enp2s0", "-iu", "br_adguard", "10.0.0.10"]
cap_add:
- NET_ADMIN
network_mode: host
mem_reservation: 6m
profiles:
- adguardhome-dhcp
adguardhome:
image: adguard/adguardhome
container_name: adguardhome
restart: always
healthcheck:
test: ["CMD", "wget", "http://127.0.0.1:3000", "-qO", "/dev/null"]
interval: 30s
retries: 10
networks:
adguardhome:
ipv4_address: 10.0.0.10
default:
ports:
- "53:53/tcp"
- "53:53/udp"
- "68:68/tcp"
- "68:68/udp"
- "853:853/tcp"
volumes:
- ./adguardhome/work:/opt/adguardhome/work
- ./adguardhome/conf:/opt/adguardhome/conf
- ./adguardhome/certs:/opt/adguardhome/certs
labels:
- "traefik.enable=true"
- "traefik.docker.network=docker-compose-nas"
- "traefik.http.services.adguardhome.loadbalancer.server.port=3000"
- "traefik.http.routers.adguardhome.rule=(Host(`${ADGUARD_HOSTNAME}`))"
- "traefik.http.routers.adguardhome.tls=true"
- "traefik.http.routers.adguardhome.tls.certresolver=myresolver"
- homepage.group=Utilities
- homepage.name=Adguard
- homepage.icon=adguard-home.png
- homepage.href=https://${ADGUARD_HOSTNAME}
- homepage.description=DNS Adblocker
- homepage.weight=0
- homepage.widget.type=adguard
- homepage.widget.url=https://${ADGUARD_HOSTNAME}
- homepage.widget.username=${ADGUARD_USERNAME}
- homepage.widget.password=${ADGUARD_PASSWORD}
profiles:
- adguardhome
traefik-certs-dumper:
image: ghcr.io/ldez/traefik-certs-dumper:latest
container_name: traefik-certs-dumper
restart: always
entrypoint: sh -c '
apk add jq
; while ! [ -e /data/acme.json ]
|| ! [ `jq ".[] | .Certificates | length" /data/acme.json` != 0 ]; do
sleep 1
; done
&& traefik-certs-dumper file --version v2 --watch
--clean false
--source /data/acme.json --dest /certs'
volumes:
- ./letsencrypt:/data
- ./adguardhome/certs:/certs
profiles:
- adguardhome
networks:
adguardhome:
driver: bridge
driver_opts:
com.docker.network.bridge.name: br_adguard
ipam:
config:
- subnet: 10.0.0.0/24

562
docker-compose.yml Normal file
View File

@ -0,0 +1,562 @@
services:
traefik:
image: ghcr.io/traefik/traefik:3.3
container_name: traefik
restart: always
environment:
- CLOUDFLARE_EMAIL=${CLOUDFLARE_EMAIL}
- CLOUDFLARE_DNS_API_TOKEN=${CLOUDFLARE_DNS_API_TOKEN}
- CLOUDFLARE_ZONE_API_TOKEN=${CLOUDFLARE_ZONE_API_TOKEN}
- LETS_ENCRYPT_EMAIL=${LETS_ENCRYPT_EMAIL}
command:
- --ping=true
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --entrypoints.web.address=:80
- --entrypoints.web-secure.address=:443
- --entrypoints.web.http.redirections.entryPoint.to=web-secure
- --entrypoints.web.http.redirections.entryPoint.scheme=https
- --entrypoints.web.http.redirections.entrypoint.permanent=true
- --experimental.plugins.rewrite-body.modulename=github.com/packruler/rewrite-body
- --experimental.plugins.rewrite-body.version=v1.2.0
- --experimental.plugins.rewriteHeaders.modulename=github.com/XciD/traefik-plugin-rewrite-headers
- --experimental.plugins.rewriteHeaders.version=v0.0.3
- --certificatesresolvers.myresolver.acme.dnschallenge=${DNS_CHALLENGE:-true}
- --certificatesresolvers.myresolver.acme.dnschallenge.provider=${DNS_CHALLENGE_PROVIDER:-cloudflare}
- --certificatesresolvers.myresolver.acme.dnschallenge.resolvers=1.1.1.1:53,8.8.8.8:53
- --certificatesresolvers.myresolver.acme.caserver=${LETS_ENCRYPT_CA_SERVER:-https://acme-v02.api.letsencrypt.org/directory}
- --certificatesresolvers.myresolver.acme.email=${LETS_ENCRYPT_EMAIL}
- --certificatesresolvers.myresolver.acme.storage=/letsencrypt/acme.json
ports:
- "80:80"
- "443:443"
volumes:
- ${CONFIG_ROOT:-.}/letsencrypt:/letsencrypt
- "/var/run/docker.sock:/var/run/docker.sock:ro"
extra_hosts:
- host.docker.internal:172.17.0.1
healthcheck:
test: ["CMD", "traefik", "healthcheck", "--ping"]
interval: 30s
retries: 10
sonarr:
image: lscr.io/linuxserver/sonarr
container_name: sonarr
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
volumes:
- ${CONFIG_ROOT:-.}/sonarr:/config
- ${DATA_ROOT}:/data
restart: always
healthcheck:
test: ["CMD", "curl", "--fail", "http://127.0.0.1:8989/sonarr/ping"]
interval: 30s
retries: 10
labels:
- traefik.enable=true
- traefik.http.routers.sonarr.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/sonarr`))
- traefik.http.routers.sonarr.tls=true
- traefik.http.routers.sonarr.tls.certresolver=myresolver
- traefik.http.services.sonarr.loadbalancer.server.port=8989
- homepage.group=Media
- homepage.name=Sonarr
- homepage.icon=sonarr.png
- homepage.href=/sonarr
- homepage.description=Series management
- homepage.weight=0
- homepage.widget.type=sonarr
- homepage.widget.url=http://sonarr:8989/sonarr
- homepage.widget.key=${SONARR_API_KEY}
radarr:
image: lscr.io/linuxserver/radarr
container_name: radarr
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
volumes:
- ${CONFIG_ROOT:-.}/radarr:/config
- ${DATA_ROOT}:/data
restart: always
healthcheck:
test: ["CMD", "curl", "--fail", "http://127.0.0.1:7878/radarr/ping"]
interval: 30s
retries: 10
labels:
- traefik.enable=true
- traefik.http.routers.radarr.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/radarr`))
- traefik.http.routers.radarr.tls=true
- traefik.http.routers.radarr.tls.certresolver=myresolver
- traefik.http.services.radarr.loadbalancer.server.port=7878
- homepage.group=Media
- homepage.name=Radarr
- homepage.icon=radarr.png
- homepage.href=/radarr
- homepage.description=Movies management
- homepage.weight=1
- homepage.widget.type=radarr
- homepage.widget.url=http://radarr:7878/radarr
- homepage.widget.key=${RADARR_API_KEY}
lidarr:
image: lscr.io/linuxserver/lidarr
container_name: lidarr
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
volumes:
- ${CONFIG_ROOT:-.}/lidarr:/config
- ${DATA_ROOT}:/data
restart: always
healthcheck:
test: ["CMD", "curl", "--fail", "http://127.0.0.1:8686/lidarr/ping"]
interval: 30s
retries: 10
labels:
- traefik.enable=true
- traefik.http.routers.lidarr.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/lidarr`))
- traefik.http.routers.lidarr.tls=true
- traefik.http.routers.lidarr.tls.certresolver=myresolver
- traefik.http.services.lidarr.loadbalancer.server.port=8686
- homepage.group=Media
- homepage.name=Lidarr
- homepage.icon=lidarr.png
- homepage.href=/lidarr
- homepage.description=Music management
- homepage.weight=2
- homepage.widget.type=lidarr
- homepage.widget.url=http://lidarr:8686/lidarr
- homepage.widget.key=${LIDARR_API_KEY}
profiles:
- lidarr
bazarr:
image: lscr.io/linuxserver/bazarr
container_name: bazarr
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
volumes:
- ${CONFIG_ROOT:-.}/bazarr/config:/config
- ${DATA_ROOT}:/data
restart: always
healthcheck:
test: ["CMD", "curl", "--fail", "http://127.0.0.1:6767/bazarr/ping"]
interval: 5s
retries: 10
labels:
- traefik.enable=true
- traefik.http.routers.bazarr.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/bazarr`))
- traefik.http.routers.bazarr.tls=true
- traefik.http.routers.bazarr.tls.certresolver=myresolver
- traefik.http.services.bazarr.loadbalancer.server.port=6767
- homepage.group=Download
- homepage.name=Bazarr
- homepage.icon=bazarr.png
- homepage.href=/bazarr
- homepage.description=Subtitles management
- homepage.weight=4
- homepage.widget.type=bazarr
- homepage.widget.url=http://bazarr:6767/bazarr
- homepage.widget.key=${BAZARR_API_KEY}
jellyseerr:
image: ghcr.io/fallenbagel/jellyseerr:latest
container_name: jellyseerr
environment:
- LOG_LEVEL=debug
- TZ=${TIMEZONE}
volumes:
- ${CONFIG_ROOT:-.}/jellyseerr:/app/config
restart: always
healthcheck:
test:
[
"CMD",
"wget",
"http://127.0.0.1:5055/api/v1/status",
"-qO",
"/dev/null",
]
interval: 30s
retries: 10
labels:
- traefik.enable=true
- traefik.http.routers.jellyseerr.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/jellyseerr`))
- traefik.http.routers.jellyseerr.tls=true
- traefik.http.routers.jellyseerr.tls.certresolver=myresolver
- traefik.http.services.jellyseerr.loadbalancer.server.port=5055
- traefik.http.routers.jellyseerr.middlewares=jellyseerr-stripprefix,jellyseerr-rewrite,jellyseerr-rewriteHeaders
- traefik.http.middlewares.jellyseerr-stripprefix.stripPrefix.prefixes=/jellyseerr
- traefik.http.middlewares.jellyseerr-rewriteHeaders.plugin.rewriteHeaders.rewrites[0].header=location
- traefik.http.middlewares.jellyseerr-rewriteHeaders.plugin.rewriteHeaders.rewrites[0].regex=^/(.+)$
- traefik.http.middlewares.jellyseerr-rewriteHeaders.plugin.rewriteHeaders.rewrites[0].replacement=/jellyseerr/$1
- traefik.http.middlewares.jellyseerr-rewriteHeaders.plugin.rewriteHeaders.rewrites[1].header=location
- traefik.http.middlewares.jellyseerr-rewriteHeaders.plugin.rewriteHeaders.rewrites[1].regex=^/$
- traefik.http.middlewares.jellyseerr-rewriteHeaders.plugin.rewriteHeaders.rewrites[1].replacement=/jellyseerr
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.monitoring.types[0]=text/html
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.monitoring.types[1]=application/javascript
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.monitoring.types[2]=*/*
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.monitoring.types[3]=application/json
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[1].regex=/_next
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[1].replacement=/jellyseerr/_next
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[2].regex=/_next/data/
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[2].replacement=/jellyseerr/_next/data/
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[3].regex=/api/v1
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[3].replacement=/jellyseerr/api/v1
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[4].regex=/login/plex/loading
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[4].replacement=/jellyseerr/login/plex/loading
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[5].regex=/images/
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[5].replacement=/jellyseerr/images/
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[6].regex=/favicon
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[6].replacement=/jellyseerr/favicon
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[7].regex=/logo_
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[7].replacement=/jellyseerr/logo_
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[8].regex=/site.webmanifest
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[8].replacement=/jellyseerr/site.webmanifest
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[9].regex=/sw.js
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[9].replacement=/jellyseerr/sw.js
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[10].regex=/offline.html
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[10].replacement=/jellyseerr/offline.html
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[11].regex=src="/os_logo_square.png"
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[11].replacement=src="/jellyseerr/os_logo_square.png"
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[12].regex=href([=:])"/([/a-zA-Z?=]*)"
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[12].replacement=href$1"/jellyseerr/$2"
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[13].regex=linkUrl:"/([/a-zA-Z?=]*)"
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[13].replacement=linkUrl:"/jellyseerr/$1"
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[14].regex="/([a-z]+)/".concat
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[14].replacement="/jellyseerr/$1/".concat
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[15].regex=url:"/([/a-zA-Z?=]*)"
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[15].replacement=url:"/jellyseerr/$1"
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[16].regex=/imageproxy/
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[16].replacement=/jellyseerr/imageproxy/
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[17].regex=/avatarproxy/
- traefik.http.middlewares.jellyseerr-rewrite.plugin.rewrite-body.rewrites[17].replacement=/jellyseerr/avatarproxy/
- homepage.group=Media
- homepage.name=JellySeerr
- homepage.icon=jellyseerr.png
- homepage.href=/jellyseerr
- homepage.description=Content Recommendations and Request Management
- homepage.weight=3
- homepage.widget.type=jellyseerr
- homepage.widget.url=http://jellyseerr:5055
- homepage.widget.key=${JELLYSEERR_API_KEY}
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
volumes:
- ${CONFIG_ROOT:-.}/prowlarr:/config
restart: always
healthcheck:
test: ["CMD", "curl", "--fail", "http://127.0.0.1:9696/prowlarr/ping"]
interval: 30s
retries: 10
labels:
- traefik.enable=true
- traefik.http.routers.prowlarr.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/prowlarr`))
- traefik.http.routers.prowlarr.tls=true
- traefik.http.routers.prowlarr.tls.certresolver=myresolver
- traefik.http.services.prowlarr.loadbalancer.server.port=9696
- homepage.group=Download
- homepage.name=Prowlarr
- homepage.icon=prowlarr.png
- homepage.href=/prowlarr
- homepage.description=Indexers management
- homepage.weight=1
- homepage.widget.type=prowlarr
- homepage.widget.url=http://prowlarr:9696/prowlarr
- homepage.widget.key=${PROWLARR_API_KEY}
flaresolverr:
image: 21hsmw/flaresolverr:nodriver
container_name: flaresolverr
restart: always
environment:
- LOG_LEVEL=${LOG_LEVEL:-info}
- LOG_HTML=${LOG_HTML:-false}
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
- TZ=${TIMEZONE}
labels:
- traefik.enable=true
- traefik.http.routers.flaresolverr.rule=PathPrefix(`/flaresolverr`)
- traefik.http.routers.flaresolverr.tls=true
- traefik.http.services.flaresolverr.loadbalancer.server.port=8191
profiles:
- flaresolverr
qbittorrent:
image: lscr.io/linuxserver/qbittorrent:libtorrentv1
container_name: qbittorrent
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
- WEBUI_PORT=8080
- DOCKER_MODS=ghcr.io/gabe565/linuxserver-mod-vuetorrent
volumes:
- ${CONFIG_ROOT:-.}/qbittorrent:/config
- ${DOWNLOAD_ROOT}:/data/torrents
restart: always
healthcheck:
# Container may fail if the PIA's token expired, so mark as unhealthy when there is no internet connection
# see: https://github.com/qdm12/gluetun/issues/641#issuecomment-933856220
test:
["CMD", "curl", "--fail", "http://127.0.0.1:8080", "https://google.com"]
interval: 30s
retries: 10
network_mode: "service:vpn"
depends_on:
vpn:
condition: service_healthy
labels:
- traefik.enable=true
- traefik.http.routers.qbittorrent.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/qbittorrent`))
- traefik.http.routers.qbittorrent.tls=true
- traefik.http.routers.qbittorrent.tls.certresolver=myresolver
- traefik.http.services.qbittorrent.loadbalancer.server.port=8080
- traefik.http.routers.qbittorrent.middlewares=qbittorrent-strip-slash,qbittorrent-stripprefix
# https://github.com/qbittorrent/qBittorrent/issues/5693#issuecomment-552146296
- traefik.http.middlewares.qbittorrent-stripprefix.stripPrefix.prefixes=/qbittorrent
# https://community.traefik.io/t/middleware-to-add-the-if-needed/1895/19
- traefik.http.middlewares.qbittorrent-strip-slash.redirectregex.regex=(^.*\/qbittorrent$$)
- traefik.http.middlewares.qbittorrent-strip-slash.redirectregex.replacement=$$1/
- traefik.http.middlewares.qbittorrent-strip-slash.redirectregex.permanent=false
#- com.centurylinklabs.watchtower.depends-on=/vpn
- homepage.group=Download
- homepage.name=qBittorrent
- homepage.icon=qbittorrent.png
- homepage.href=/qbittorrent
- homepage.description=Bittorrent client
- homepage.weight=2
- homepage.widget.type=qbittorrent
- homepage.widget.url=http://vpn:8080
- homepage.widget.username=${QBITTORRENT_USERNAME}
- homepage.widget.password=${QBITTORRENT_PASSWORD}
vpn:
image: ghcr.io/thrnz/docker-wireguard-pia:latest
container_name: vpn
volumes:
- ${CONFIG_ROOT:-.}/pia:/pia
- ${CONFIG_ROOT:-.}/pia-shared:/pia-shared
cap_add:
- NET_ADMIN
- SYS_MODULE
environment:
- LOC=${PIA_LOCATION}
- USER=${PIA_USER}
- PASS=${PIA_PASS}
- QBT_USER=${QBITTORRENT_USERNAME}
- QBT_PASS=${QBITTORRENT_PASSWORD}
- LOCAL_NETWORK=${PIA_LOCAL_NETWORK}
- PORT_FORWARDING=1
- PORT_PERSIST=1
- PORT_SCRIPT=/pia-shared/portupdate-qbittorrent.sh
- FIREWALL=0
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.default.disable_ipv6=1
- net.ipv6.conf.all.disable_ipv6=1
- net.ipv6.conf.lo.disable_ipv6=1
healthcheck:
test: ping -c 1 www.google.com || exit 1
interval: 30s
timeout: 10s
retries: 3
restart: always
unpackerr:
image: ghcr.io/unpackerr/unpackerr:latest
container_name: unpackerr
volumes:
- ${DOWNLOAD_ROOT}:/data/torrents
restart: always
user: ${USER_ID}:${GROUP_ID}
environment:
- TZ=${TIMEZONE}
- UN_SONARR_0_URL=http://sonarr:8989/sonarr
- UN_SONARR_0_API_KEY=${SONARR_API_KEY}
- UN_RADARR_0_URL=http://radarr:7878/radarr
- UN_RADARR_0_API_KEY=${RADARR_API_KEY}
security_opt:
- no-new-privileges:true
sabnzbd:
image: lscr.io/linuxserver/sabnzbd:latest
container_name: sabnzbd
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
volumes:
- ${CONFIG_ROOT:-.}/sabnzbd:/config
- ${DATA_ROOT}:/data
restart: always
labels:
- traefik.enable=true
- traefik.http.routers.sabnzbd.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/sabnzbd`) || PathPrefix(`/sabnzbd`))
- traefik.http.routers.sabnzbd.tls=true
- traefik.http.routers.sabnzbd.tls.certresolver=myresolver
- traefik.http.services.sabnzbd.loadbalancer.server.port=8080
- homepage.group=Download
- homepage.name=Sabnzbd
- homepage.icon=sabnzbd.png
- homepage.href=/sabnzbd
- homepage.description=Usenet
- homepage.weight=3
- homepage.widget.type=sabnzbd
- homepage.widget.url=http://sabnzbd:8080/sabnzbd
- homepage.widget.key=${SABNZBD_API_KEY}
profiles:
- sabnzbd
jellyfin:
image: lscr.io/linuxserver/jellyfin:latest
container_name: jellyfin
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
- JELLYFIN_PublishedServerUrl=${HOSTNAME}/jellyfin
volumes:
- ${CONFIG_ROOT:-.}/jellyfin:/config
- ${DATA_ROOT}:/data
ports:
- "7359:7359/udp"
- "1900:1900/udp"
restart: always
healthcheck:
test: ["CMD", "curl", "--fail", "http://127.0.0.1:8096/jellyfin/health"]
interval: 30s
retries: 10
labels:
- traefik.enable=true
- traefik.http.routers.jellyfin.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/jellyfin`))
- traefik.http.routers.jellyfin.tls=true
- traefik.http.routers.jellyfin.tls.certresolver=myresolver
- traefik.http.services.jellyfin.loadbalancer.server.port=8096
- homepage.group=Media
- homepage.name=Jellyfin
- homepage.icon=jellyfin.png
- homepage.href=/jellyfin
- homepage.description=Media server
- homepage.weight=4
- homepage.widget.type=jellyfin
- homepage.widget.url=http://jellyfin:8096/jellyfin
- homepage.widget.key=${JELLYFIN_API_KEY}
calibre-web:
image: lscr.io/linuxserver/calibre-web:latest
container_name: calibre-web
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
- DOCKER_MODS=linuxserver/mods:universal-calibre
- OAUTHLIB_RELAX_TOKEN_SCOPE=1
volumes:
- ${CONFIG_ROOT:-.}/calibre-web:/config
- ${DATA_ROOT}/books:/books
restart: unless-stopped
labels:
- traefik.enable=true
- traefik.http.middlewares.calibre-headers.headers.customRequestHeaders.X-Scheme=https
- traefik.http.middlewares.calibre-headers.headers.customRequestHeaders.X-Script-Name=/calibre
- traefik.http.middlewares.calibre-stripprefixregex.stripPrefixRegex.regex=/calibre
- traefik.http.routers.calibre.middlewares=calibre-headers,calibre-stripprefixregex
- traefik.http.routers.calibre.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/calibre`))
- traefik.http.routers.calibre.tls=true
- traefik.http.routers.calibre.tls.certresolver=myresolver
- traefik.http.services.calibre.loadbalancer.server.port=8083
- homepage.group=Media
- homepage.name=Calibre-Web
- homepage.icon=calibre-web.png
- homepage.href=/calibre
- homepage.description=Books management
- homepage.weight=5
- homepage.widget.type=calibreweb
- homepage.widget.url=http://calibre-web:8083
- homepage.widget.username=${CALIBRE_USERNAME}
- homepage.widget.password=${CALIBRE_PASSWORD}
profiles:
- calibre-web
decluttarr:
image: ghcr.io/manimatter/decluttarr:latest
container_name: decluttarr
restart: always
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
- RADARR_URL=http://radarr:7878/radarr
- RADARR_KEY=${RADARR_API_KEY}
- SONARR_URL=http://sonarr:8989/sonarr
- SONARR_KEY=${SONARR_API_KEY}
- LIDARR_URL=http://lidarr:8686/lidarr
- LIDARR_KEY=${LIDARR_API_KEY}
- QBITTORRENT_URL=http://qbittorrent:8080
- QBITTORRENT_USERNAME=${QBITTORRENT_USERNAME}
- QBITTORRENT_PASSWORD=${QBITTORRENT_PASSWORD}
- LOG_LEVEL=${DECLUTTARR_LOG_LEVEL:-INFO}
- TEST_RUN=${DECLUTTARR_TEST_RUN:-False}
- REMOVE_TIMER=${DECLUTTARR_REMOVE_TIMER:-10}
- REMOVE_FAILED=${DECLUTTARR_REMOVE_FAILED:-True}
- REMOVE_FAILED_IMPORTS=${DECLUTTARR_REMOVE_FAILED_IMPORTS:-True}
- REMOVE_METADATA_MISSING=${DECLUTTARR_REMOVE_METADATA_MISSING:-True}
- REMOVE_MISSING_FILES=${DECLUTTARR_REMOVE_MISSING_FILES:-True}
- REMOVE_ORPHANS=${DECLUTTARR_REMOVE_ORPHANS:-True}
- REMOVE_SLOW=${DECLUTTARR_REMOVE_SLOW:-True}
- REMOVE_STALLED=${DECLUTTARR_REMOVE_STALLED:-True}
- REMOVE_UNMONITORED=${DECLUTTARR_REMOVE_UNMONITORED:-True}
- RUN_PERIODIC_RESCANS=${DECLUTTARR_RUN_PERIODIC_RESCANS:-}
- PERMITTED_ATTEMPTS=${DECLUTTARR_PERMITTED_ATTEMPTS:-3}
- NO_STALLED_REMOVAL_QBIT_TAG=${DECLUTTARR_REMOVAL_QBIT_TAG:-"stalled"}
- MIN_DOWNLOAD_SPEED=${DECLUTTARR_MIN_DOWNLOAD_SPEED:-100}
- FAILED_IMPORT_MESSAGE_PATTERNS=${DECLUTTARR_FAILED_IMPORT_MESSAGE_PATTERNS:-}
- IGNORED_DOWNLOAD_CLIENTS=${DECLUTTARR_IGNORED_DOWNLOAD_CLIENTS:-}
profiles:
- decluttarr
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
environment:
- HOMEPAGE_VAR_TITLE=${HOMEPAGE_VAR_TITLE}
- HOMEPAGE_VAR_SEARCH_PROVIDER=${HOMEPAGE_VAR_SEARCH_PROVIDER}
- HOMEPAGE_VAR_HEADER_STYLE=${HOMEPAGE_VAR_HEADER_STYLE}
- HOMEPAGE_VAR_WEATHER_CITY=${HOMEPAGE_VAR_WEATHER_CITY}
- HOMEPAGE_VAR_WEATHER_LAT=${HOMEPAGE_VAR_WEATHER_LAT}
- HOMEPAGE_VAR_WEATHER_LONG=${HOMEPAGE_VAR_WEATHER_LONG}
- HOMEPAGE_VAR_WEATHER_TIME=${TIMEZONE}
- HOMEPAGE_VAR_WEATHER_UNIT=${HOMEPAGE_VAR_WEATHER_UNIT}
- HOMEPAGE_ALLOWED_HOSTS=${HOSTNAME}
volumes:
- ${CONFIG_ROOT:-.}/homepage:/app/config
- /var/run/docker.sock:/var/run/docker.sock:ro
- ${DATA_ROOT}:/data
restart: always
command:
[sh, -c, "cp -n /app/config/tpl/*.yaml /app/config && node server.js"]
labels:
- traefik.enable=true
- traefik.http.routers.homepage.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/`))
- traefik.http.routers.homepage.tls=true
- traefik.http.routers.homepage.tls.certresolver=myresolver
- traefik.http.services.homepage.loadbalancer.server.port=3000
watchtower:
image: ghcr.io/containrrr/watchtower:latest
container_name: watchtower
restart: always
environment:
- WATCHTOWER_CLEANUP=true
volumes:
- /var/run/docker.sock:/var/run/docker.sock
autoheal:
image: willfarrell/autoheal:latest
container_name: autoheal
restart: always
environment:
- AUTOHEAL_CONTAINER_LABEL=all
volumes:
- /var/run/docker.sock:/var/run/docker.sock
networks:
default:
name: docker-compose-nas

6
homeassistant/.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
*
!README.md
!docker-compose.yml
!backup.env.example
!mosquitto.env.example
!mosquitto/config/mosquitto.conf

104
homeassistant/README.md Normal file
View File

@ -0,0 +1,104 @@
# Home Assistant
Open source home automation that puts local control and privacy first. Powered by a worldwide community of tinkerers and DIY enthusiasts
## Installation
Enable Home Assistant by setting `COMPOSE_PROFILES=homeassistant`.
Set the `HOMEASSISTANT_HOSTNAME`, since it does not support
[running in a subfolder](https://github.com/home-assistant/architecture/issues/156).
Add the necessary DNS records in your domain.
You will need to allow Traefik to access Home Assistant by adding the following in `homeassistant/configuration.yaml`:
```yaml
http:
use_x_forwarded_for: true
trusted_proxies:
- 172.0.0.0/8 # You can put a more precise range instead
```
Set the `HOMEASSISTANT_ACCESS_TOKEN` for homepage support.
## MQTT
If you need to use MQTT, you can enable it by setting `COMPOSE_PROFILES=homeassistant,mqtt`.
Start the container, create a user in mosquitto with the following command and the credentials defined previously:
`docker compose exec mosquitto mosquitto_passwd -b /mosquitto/config/pwfile <username> <password>`
Restart the Mosquitto container to apply the changes.
In HomeAssistant, add the MQTT integration with hostname `localhost`, port 1883 and the username and password defined above.
## Backup
### Enable Backups in HomeAssistant
We will create an automation that will create backups nightly and clear old ones.
Add a `command_line` inclusion in your `configuration.yaml`: `command_line: !include command_lines.yaml`
The `command_lines.yaml` defines a switch that removes backups older than 7 days:
```yaml
- switch:
name: Purge old backups
unique_id: switch.purge_backups
icon: mdi:trash-can
command_on: 'cd /config/backups/ && find . -maxdepth 1 -type f -mtime +7 -print | xargs rm -f'
```
Then, create an automation that will trigger backups nightly and call the purge old backups switch:
```yaml
alias: Backup Home Assistant every night at 3 AM
description: Backup Home Assistant every night at 3 AM
trigger:
- platform: time
at: "03:00:00"
action:
- service: backup.create
data: {}
- service: switch.turn_on
data: {}
target:
entity_id: switch.purge_old_backups
- service: switch.turn_off
data: {}
target:
entity_id: switch.purge_old_backups
mode: single
```
### Save Backups Remotely
Home Assistant can be backed up in the cloud storage product of your choice with [Rclone](https://rclone.org/).
Before a backup can be made, `rclone config` must be run to generate the configuration file:
```shell
docker compose run --rm -it homeassistant-backup rclone config
```
It will generate a `rclone.conf` configuration file in ./homeassistant/rclone/rclone.conf.
Copy the backup environment file to `backup.env` and fill it as needed:
`cp backup.env.exmple backup.env`
| Variable | Description | Default |
|----------------------|---------------------------------------------------------------------|---------------------------|
| `RCLONE_REMOTE_NAME` | Name of the remote you chose during rclone config | |
| `RCLONE_REMOTE_DIR` | Name of the rclone remote dir, eg: S3 bucket name, folder name, etc | |
| `CRON` | How often to run the backup | `@daily` backup every day |
| `TIMEZONE` | Timezone, used for cron times | `America/New_York` |
| `ZIP_PASSWORD` | Password to protect the backup archive with | `123456` |
| `BACKUP_KEEP_DAYS` | How long to keep the backup in the destination | `31` days |
You can test your backup manually with:
```shell
docker compose run --rm -it homeassistant-backup backup
```

View File

@ -0,0 +1,6 @@
RCLONE_REMOTE_NAME=
RCLONE_REMOTE_DIR=
CRON=@daily
TIMEZONE=America/New_York
ZIP_PASSWORD=123456
BACKUP_KEEP_DAYS=1

View File

@ -0,0 +1,85 @@
services:
homeassistant:
image: ghcr.io/home-assistant/home-assistant:stable
container_name: homeassistant
network_mode: host
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
- TZ=${TIMEZONE}
volumes:
- ${CONFIG_ROOT:-.}/homeassistant:/config
- /etc/localtime:/etc/localtime:ro
- /run/dbus:/run/dbus:ro
restart: always
healthcheck:
test: ["CMD", "curl", "--fail", "http://127.0.0.1:8123"]
interval: 30s
retries: 10
privileged: true
labels:
- traefik.enable=true
- traefik.http.routers.homeassistant.rule=(Host(`${HOMEASSISTANT_HOSTNAME}`))
- traefik.http.routers.homeassistant.tls=true
- traefik.http.routers.homeassistant.tls.certresolver=myresolver
- traefik.http.services.homeassistant.loadbalancer.server.port=8123
- homepage.group=Apps
- homepage.name=Home Assistant
- homepage.icon=home-assistant.png
- homepage.href=https://${HOMEASSISTANT_HOSTNAME}
- homepage.description=Open source home automation that puts local control and privacy first
- homepage.weight=3
- homepage.widget.type=homeassistant
- homepage.widget.url=https://${HOMEASSISTANT_HOSTNAME}
- homepage.widget.key=${HOMEASSISTANT_ACCESS_TOKEN}
profiles:
- homeassistant
mosquitto:
container_name: mosquitto
image: public.ecr.aws/docker/library/eclipse-mosquitto:latest
restart: always
user: ${USER_ID}:${GROUP_ID}
environment:
- PUID=${USER_ID}
- PGID=${GROUP_ID}
volumes:
- ${CONFIG_ROOT:-.}/homeassistant/mosquitto/config:/mosquitto/config
- ${CONFIG_ROOT:-.}/homeassistant/mosquitto/data:/mosquitto/data
- ${CONFIG_ROOT:-.}/homeassistant/mosquitto/log:/mosquitto/log
ports:
- "1883:1883"
healthcheck:
test:
[
"CMD",
"mosquitto_sub",
"-p",
"1880",
"-t",
"$$SYS/#",
"-C",
"1",
"-i",
"healthcheck",
"-W",
"3",
]
interval: 1m
timeout: 10s
retries: 3
profiles:
- mqtt
homeassistant-backup:
image: ghcr.io/adrienpoupa/rclone-backup:latest
container_name: homeassistant-backup
restart: always
env_file:
- ${CONFIG_ROOT:-.}/homeassistant/backup.env
environment:
- BACKUP_FOLDER_NAME=backups
- BACKUP_FOLDER_PATH=/backups
volumes:
- ${CONFIG_ROOT:-.}/homeassistant/backups:/backups
- ${CONFIG_ROOT:-.}/homeassistant/backup:/config
profiles:
- homeassistant

View File

@ -0,0 +1,12 @@
persistence true
persistence_location /mosquitto/data
password_file /mosquitto/config/pwfile
allow_anonymous false
log_dest file /mosquitto/log/mosquitto.log
listener 1883
listener 1880 127.0.0.1
allow_anonymous true

0
homepage/.gitkeep Normal file
View File

View File

@ -0,0 +1,8 @@
---
# For configuration options and examples, please see:
# https://gethomepage.dev/en/configs/bookmarks
- Credits:
- Docker-Compose-NAS:
- abbr: NAS
href: https://github.com/AdrienPoupa/docker-compose-nas

6
homepage/tpl/docker.yaml Normal file
View File

@ -0,0 +1,6 @@
---
# For configuration options and examples, please see:
# https://gethomepage.dev/en/configs/docker/
my-docker:
socket: /var/run/docker.sock

View File

@ -0,0 +1,3 @@
---
# For configuration options and examples, please see:
# https://gethomepage.dev/en/configs/services

View File

@ -0,0 +1,20 @@
---
# For configuration options and examples, please see:
# https://gethomepage.dev/en/configs/settings
title: {{HOMEPAGE_VAR_TITLE}}
headerStyle: {{HOMEPAGE_VAR_HEADER_STYLE}}
layout:
Media:
style: row
columns: 3
Download:
style: row
columns: 2
quicklaunch:
searchDescriptions: true
hideInternetSearch: true
hideVisitURL: true

27
homepage/tpl/widgets.yaml Normal file
View File

@ -0,0 +1,27 @@
---
# For configuration options and examples, please see:
# https://gethomepage.dev/en/configs/widgets
- resources:
cpu: true
memory: true
disk:
- /
- /data
- search:
provider: {{HOMEPAGE_VAR_SEARCH_PROVIDER}}
target: _blank
- openmeteo:
label: {{HOMEPAGE_VAR_WEATHER_CITY}}
latitude: {{HOMEPAGE_VAR_WEATHER_LAT}}
longitude: {{HOMEPAGE_VAR_WEATHER_LONG}}
timezone: {{HOMEPAGE_VAR_WEATHER_TIME}}
units: {{HOMEPAGE_VAR_WEATHER_UNIT}}
cache: 5 # Time in minutes to cache API responses, to stay within limits
- datetime:
text_size: md
format:
timeStyle: short

4
immich/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
*
!README.md
!docker-compose.yml
!healthcheck

20
immich/README.md Normal file
View File

@ -0,0 +1,20 @@
# Immich
Self-hosted photo and video management solution
## Installation
Enable Immich by setting `COMPOSE_PROFILES=immich`.
Set the `IMMICH_HOSTNAME`, since it does not support
[running in a subfolder](https://github.com/immich-app/immich/discussions/1679#discussioncomment-7276351).
Add the necessary DNS records in your domain.
## Environment Variables
| Variable | Description | Default |
|--------------------------|------------------------------------------------------|--------------------|
| `IMMICH_HOSTNAME` | URL Immich will be accessible from | |
| `IMMICH_UPLOAD_LOCATION` | Path where the assets will be stored | `/mnt/data/photos` |
| `IMMICH_API_KEY` | Immich API key to show information in the homepage | `1000` |
| `IMMICH_DB_PASSWORD` | Postgres database password, change for more security | `postgres` |

89
immich/docker-compose.yml Normal file
View File

@ -0,0 +1,89 @@
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:v1.129.0
environment:
DB_HOSTNAME: immich_postgres
DB_PASSWORD: ${IMMICH_DB_PASSWORD}
DB_USERNAME: postgres
DB_DATABASE_NAME: immich
REDIS_HOSTNAME: immich_redis
volumes:
- ${IMMICH_UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
- ${CONFIG_ROOT:-.}/immich/healthcheck:/healthcheck
depends_on:
- immich-redis
- immich-database
restart: always
labels:
- traefik.enable=true
- traefik.http.routers.immich.rule=(Host(`${IMMICH_HOSTNAME}`))
- traefik.http.routers.immich.tls=true
- traefik.http.routers.immich.tls.certresolver=myresolver
- traefik.http.services.immich.loadbalancer.server.port=2283
- homepage.group=Apps
- homepage.name=Immich
- homepage.icon=immich.png
- homepage.href=https://${IMMICH_HOSTNAME}
- homepage.description=Self-hosted photo and video management solution
- homepage.weight=4
- homepage.widget.type=immich
- homepage.widget.url=http://immich-server:2283
- homepage.widget.key=${IMMICH_API_KEY}
- homepage.widget.version=2
profiles:
- immich
immich-machine-learning:
container_name: immich_machine_learning
image: ghcr.io/immich-app/immich-machine-learning:v1.129.0
volumes:
- immich-model-cache:/cache
restart: always
profiles:
- immich
immich-redis:
container_name: immich_redis
image: public.ecr.aws/docker/library/redis:6.2
restart: always
healthcheck:
test: redis-cli ping || exit 1
profiles:
- immich
immich-database:
container_name: immich_postgres
image: ghcr.io/tensorchord/pgvecto-rs:pg14-v0.2.0
environment:
POSTGRES_PASSWORD: ${IMMICH_DB_PASSWORD}
POSTGRES_USER: postgres
POSTGRES_DB: immich
POSTGRES_INITDB_ARGS: "--data-checksums"
volumes:
- ${CONFIG_ROOT:-.}/immich/postgresql:/var/lib/postgresql/data
restart: always
healthcheck:
test: >-
pg_isready --dbname=immich --username=postgres || exit 1;
Chksum="$$(psql --dbname=immich --username=postgres --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
profiles:
- immich
volumes:
immich-model-cache:

2
jellyfin/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
*
!network.xml

28
jellyfin/network.xml Normal file
View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="utf-8"?>
<NetworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<BaseUrl>/jellyfin</BaseUrl>
<EnableHttps>false</EnableHttps>
<RequireHttps>false</RequireHttps>
<CertificatePath />
<CertificatePassword />
<InternalHttpPort>8096</InternalHttpPort>
<InternalHttpsPort>8920</InternalHttpsPort>
<PublicHttpPort>8096</PublicHttpPort>
<PublicHttpsPort>8920</PublicHttpsPort>
<AutoDiscovery>true</AutoDiscovery>
<EnableUPnP>false</EnableUPnP>
<EnableIPv4>true</EnableIPv4>
<EnableIPv6>false</EnableIPv6>
<EnableRemoteAccess>true</EnableRemoteAccess>
<LocalNetworkSubnets />
<LocalNetworkAddresses />
<KnownProxies />
<IgnoreVirtualInterfaces>true</IgnoreVirtualInterfaces>
<VirtualInterfaceNames>
<string>veth</string>
</VirtualInterfaceNames>
<EnablePublishedServerUriByRequest>false</EnablePublishedServerUriByRequest>
<PublishedServerUriBySubnet />
<RemoteIPFilter />
<IsRemoteIPFilterBlacklist>false</IsRemoteIPFilterBlacklist>
</NetworkConfiguration>

8
joplin/.env.example Normal file
View File

@ -0,0 +1,8 @@
MAILER_ENABLED=false
MAILER_HOST=
MAILER_PORT=465
MAILER_SECURITY=MailerSecurity.Tls
MAILER_AUTH_USER=
MAILER_AUTH_PASSWORD=
MAILER_NOREPLY_NAME=
MAILER_NOREPLY_EMAIL=

4
joplin/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
/database
/storage
.env
backup.env

49
joplin/README.md Normal file
View File

@ -0,0 +1,49 @@
# Joplin
[Joplin](https://joplinapp.org/) is an open source note-taking app. Capture your thoughts and securely access them from any device.
This service lets you host your own Joplin server, which your clients can connect to.
## Installation
Enable Joplin by setting `COMPOSE_PROFILES=joplin`. It will be accessible at `/joplin`.
Copy the example environment file and edit as needed before running Joplin: `cp joplin/env.example joplin/.env`.
## Backup
Joplin's database and media files can be backed up in the cloud storage product of your choice with [Rclone](https://rclone.org/).
Before a backup can be made, `rclone config` must be run to generate the configuration file:
```shell
docker compose run --rm -it joplin-backup rclone config
```
It will generate a `rclone.conf` configuration file in ./joplin/rclone/rclone.conf.
Copy the backup environment file to `backup.env` and fill it as needed:
`cp backup.env.exmple backup.env`
| Variable | Description | Default |
|------------------------|---------------------------------------------------------------------|---------------------------|
| `MAILER_ENABLED` | Enable Joplin mailer | `false` |
| `MAILER_HOST` | Mailer hostname | |
| `MAILER_PORT` | Mailer port | `465` |
| `MAILER_SECURITY` | Mailer security protocol | `MailerSecurity.Tls` |
| `MAILER_AUTH_USER` | Mailer user | |
| `MAILER_AUTH_PASSWORD` | Mailer password | |
| `MAILER_NOREPLY_NAME` | No reply email name | |
| `MAILER_NOREPLY_EMAIL` | No reply email address | |
| `RCLONE_REMOTE_NAME` | Name of the remote you chose during rclone config | |
| `RCLONE_REMOTE_DIR` | Name of the rclone remote dir, eg: S3 bucket name, folder name, etc | |
| `CRON` | How often to run the backup | `@daily` backup every day |
| `TIMEZONE` | Timezone, used for cron times | `America/New_York` |
| `ZIP_PASSWORD` | Password to protect the backup archive with | `123456` |
| `BACKUP_KEEP_DAYS` | How long to keep the backup in the destination | `31` days |
You can test your backup manually with:
```shell
docker compose run --rm -it joplin-backup backup
```

View File

@ -0,0 +1,6 @@
RCLONE_REMOTE_NAME=
RCLONE_REMOTE_DIR=
CRON=@daily
TIMEZONE=America/New_York
ZIP_PASSWORD=123456
BACKUP_KEEP_DAYS=31

0
joplin/database/.gitkeep Normal file
View File

59
joplin/docker-compose.yml Normal file
View File

@ -0,0 +1,59 @@
services:
joplin:
image: joplin/server:latest
user: root # Not pretty, but non-root breaks volumes: https://github.com/laurent22/joplin/issues/9489
container_name: joplin
restart: always
env_file:
- ${CONFIG_ROOT:-.}/joplin/.env
environment:
- APP_PORT=22300
- APP_BASE_URL=https://${HOSTNAME}/joplin
- HOSTNAME=${HOSTNAME}
- DB_CLIENT=sqlite3
- SQLITE_DATABASE=/database/joplin.db
- STORAGE_DRIVER=Type=Filesystem; Path=/storage
- EVENTS_AUTO_DELETE_ENABLED=true
- EVENTS_AUTO_DELETE_AFTER_DAYS=7
volumes:
- ${CONFIG_ROOT:-.}/joplin/database:/database
- ${CONFIG_ROOT:-.}/joplin/storage:/storage
- ${CONFIG_ROOT:-.}/joplin/healthcheck:/healthcheck
healthcheck:
test: ["CMD", "node", "/healthcheck/healthcheck.js"]
interval: 30s
retries: 10
labels:
- traefik.enable=true
- traefik.http.routers.joplin.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/joplin`))
- traefik.http.routers.joplin.tls=true
- traefik.http.routers.joplin.tls.certresolver=myresolver
- traefik.http.routers.joplin.middlewares=joplin-stripprefix
- traefik.http.middlewares.joplin-stripprefix.stripPrefix.prefixes=/joplin
- traefik.http.services.joplin.loadbalancer.server.port=22300
- homepage.group=Apps
- homepage.name=Joplin
- homepage.icon=joplin.png
- homepage.href=/joplin
- homepage.description=Note-taking application
- homepage.weight=2
profiles:
- joplin
joplin-backup:
image: ghcr.io/adrienpoupa/rclone-backup:latest
container_name: joplin-backup
restart: always
env_file:
- ${CONFIG_ROOT:-.}/joplin/backup.env
environment:
- BACKUP_FOLDER_NAME=storage
- BACKUP_FOLDER_PATH=/storage
- DB_TYPE=sqlite
- SQLITE_DATABASE=/database/joplin.db
volumes:
- ${CONFIG_ROOT:-.}/joplin/database:/database
- ${CONFIG_ROOT:-.}/joplin/storage:/storage
- ${CONFIG_ROOT:-.}/joplin/backup:/config
profiles:
- joplin

View File

@ -0,0 +1,28 @@
// Inspired by: https://anthonymineo.com/docker-healthcheck-for-your-node-js-app/
const http = require('http');
const options = {
host: '127.0.0.1',
port: 22300,
timeout: 2000,
path: '/api/ping',
headers: {
'Host': process.env.HOSTNAME,
}
};
const healthCheck = http.request(options, (res) => {
console.log(`HEALTHCHECK STATUS: ${res.statusCode}`);
if (res.statusCode === 200) {
process.exit(0);
}
else {
process.exit(1);
}
});
healthCheck.on('error', function (err) {
console.error('ERROR:' + err);
process.exit(1);
});
healthCheck.end();

View File

@ -0,0 +1,21 @@
#!/bin/bash
port="$1"
QBT_PORT=8080
echo "Setting qBittorrent port settings ($port)..."
# Very basic retry logic so we don't fail if qBittorrent isn't running yet
while ! curl --silent --retry 10 --retry-delay 15 --max-time 10 \
--data "username=${QBT_USER}&password=${QBT_PASS}" \
--cookie-jar /tmp/qb-cookies.txt \
http://localhost:${QBT_PORT}/api/v2/auth/login
do
sleep 10
done
curl --silent --retry 10 --retry-delay 15 --max-time 10 \
--data 'json={"listen_port": "'"$port"'"}' \
--cookie /tmp/qb-cookies.txt \
http://localhost:${QBT_PORT}/api/v2/app/setPreferences
echo "qBittorrent port updated successfully ($port)..."

191
tandoor/.env.example Normal file
View File

@ -0,0 +1,191 @@
# only set this to true when testing/debugging
# when unset: 1 (true) - dont unset this, just for development
DEBUG=0
SQL_DEBUG=0
DEBUG_TOOLBAR=0
# Gunicorn log level for debugging (default value is "info" when unset)
# (see https://docs.gunicorn.org/en/stable/settings.html#loglevel for available settings)
# GUNICORN_LOG_LEVEL="debug"
# HTTP port to bind to
# TANDOOR_PORT=8080
# hosts the application can run under e.g. recipes.mydomain.com,cooking.mydomain.com,...
ALLOWED_HOSTS=*
# Cross Site Request Forgery protection
# (https://docs.djangoproject.com/en/4.2/ref/settings/#std-setting-CSRF_TRUSTED_ORIGINS)
# CSRF_TRUSTED_ORIGINS = []
# Cross Origin Resource Sharing
# (https://github.com/adamchainz/django-cors-header)
# CORS_ALLOW_ALL_ORIGINS = True
# random secret key, use for example `base64 /dev/urandom | head -c50` to generate one
# ---------------------------- AT LEAST ONE REQUIRED -------------------------
SECRET_KEY=
SECRET_KEY_FILE=
# ---------------------------------------------------------------
# your default timezone See https://timezonedb.com/time-zones for a list of timezones
TZ=America/New_York
# add only a database password if you want to run with the default postgres, otherwise change settings accordingly
DB_ENGINE=django.db.backends.sqlite3
# DB_OPTIONS= {} # e.g. {"sslmode":"require"} to enable ssl
#POSTGRES_HOST=db_recipes
#POSTGRES_PORT=5432
#POSTGRES_USER=djangouser
# ---------------------------- AT LEAST ONE REQUIRED -------------------------
#POSTGRES_PASSWORD=
#POSTGRES_PASSWORD_FILE=
# ---------------------------------------------------------------
POSTGRES_DB=/opt/recipes/database/recipes.db
# database connection string, when used overrides other database settings.
# format might vary depending on backend
# DATABASE_URL = engine://username:password@host:port/dbname
# the default value for the user preference 'fractions' (enable/disable fraction support)
# default: disabled=0
FRACTION_PREF_DEFAULT=0
# the default value for the user preference 'comments' (enable/disable commenting system)
# default comments enabled=1
COMMENT_PREF_DEFAULT=1
# Users can set a amount of time after which the shopping list is refreshed when they are in viewing mode
# This is the minimum interval users can set. Setting this to low will allow users to refresh very frequently which
# might cause high load on the server. (Technically they can obviously refresh as often as they want with their own scripts)
SHOPPING_MIN_AUTOSYNC_INTERVAL=5
# Default for user setting sticky navbar
# STICKY_NAV_PREF_DEFAULT=1
# If base URL is something other than just / (you are serving a subfolder in your proxy for instance http://recipe_app/recipes/)
# Be sure to not have a trailing slash: e.g. '/recipes' instead of '/recipes/'
SCRIPT_NAME=/recipes
# If staticfiles are stored at a different location uncomment and change accordingly, MUST END IN /
# this is not required if you are just using a subfolder
# This can either be a relative path from the applications base path or the url of an external host
STATIC_URL=/recipes/static/
# If mediafiles are stored at a different location uncomment and change accordingly, MUST END IN /
# this is not required if you are just using a subfolder
# This can either be a relative path from the applications base path or the url of an external host
MEDIA_URL=/recipes/media/
# Serve mediafiles directly using gunicorn. Basically everyone recommends not doing this. Please use any of the examples
# provided that include an additional nxginx container to handle media file serving.
# If you know what you are doing turn this back on (1) to serve media files using djangos serve() method.
# when unset: 1 (true) - this is temporary until an appropriate amount of time has passed for everyone to migrate
GUNICORN_MEDIA=0
# GUNICORN SERVER RELATED SETTINGS (see https://docs.gunicorn.org/en/stable/design.html#how-many-workers for recommended settings)
# GUNICORN_WORKERS=1
# GUNICORN_THREADS=1
# S3 Media settings: store mediafiles in s3 or any compatible storage backend (e.g. minio)
# as long as S3_ACCESS_KEY is not set S3 features are disabled
# S3_ACCESS_KEY=
# S3_SECRET_ACCESS_KEY=
# S3_BUCKET_NAME=
# S3_REGION_NAME= # default none, set your region might be required
# S3_QUERYSTRING_AUTH=1 # default true, set to 0 to serve media from a public bucket without signed urls
# S3_QUERYSTRING_EXPIRE=3600 # number of seconds querystring are valid for
# S3_ENDPOINT_URL= # when using a custom endpoint like minio
# S3_CUSTOM_DOMAIN= # when using a CDN/proxy to S3 (see https://github.com/TandoorRecipes/recipes/issues/1943)
# Email Settings, see https://docs.djangoproject.com/en/3.2/ref/settings/#email-host
# Required for email confirmation and password reset (automatically activates if host is set)
# EMAIL_HOST=
# EMAIL_PORT=
# EMAIL_HOST_USER=
# EMAIL_HOST_PASSWORD=
# EMAIL_USE_TLS=0
# EMAIL_USE_SSL=0
# email sender address (default 'webmaster@localhost')
# DEFAULT_FROM_EMAIL=
# prefix used for account related emails (default "[Tandoor Recipes] ")
# ACCOUNT_EMAIL_SUBJECT_PREFIX=
# allow authentication via the REMOTE-USER header (can be used for e.g. authelia).
# ATTENTION: Leave off if you don't know what you are doing! Enabling this without proper configuration will enable anybody
# to login with any username!
# See docs for additional information: https://docs.tandoor.dev/features/authentication/#reverse-proxy-authentication
# when unset: 0 (false)
REMOTE_USER_AUTH=0
# Default settings for spaces, apply per space and can be changed in the admin view
# SPACE_DEFAULT_MAX_RECIPES=0 # 0=unlimited recipes
# SPACE_DEFAULT_MAX_USERS=0 # 0=unlimited users per space
# SPACE_DEFAULT_MAX_FILES=0 # Maximum file storage for space in MB. 0 for unlimited, -1 to disable file upload.
# SPACE_DEFAULT_ALLOW_SHARING=1 # Allow users to share recipes with public links
# allow people to create local accounts on your application instance (without an invite link)
# social accounts will always be able to sign up
# when unset: 0 (false)
# ENABLE_SIGNUP=0
# If signup is enabled you might want to add a captcha to it to prevent spam
# HCAPTCHA_SITEKEY=
# HCAPTCHA_SECRET=
# if signup is enabled you might want to provide urls to data protection policies or terms and conditions
# TERMS_URL=
# PRIVACY_URL=
# IMPRINT_URL=
# enable serving of prometheus metrics under the /metrics path
# ATTENTION: view is not secured (as per the prometheus default way) so make sure to secure it
# trough your web server (or leave it open of you dont care if the stats are exposed)
# ENABLE_METRICS=0
# allows you to setup OAuth providers
# see docs for more information https://docs.tandoor.dev/features/authentication/
# SOCIAL_PROVIDERS = allauth.socialaccount.providers.github, allauth.socialaccount.providers.nextcloud,
# Should a newly created user from a social provider get assigned to the default space and given permission by default ?
# ATTENTION: This feature might be deprecated in favor of a space join and public viewing system in the future
# default 0 (false), when 1 (true) users will be assigned space and group
# SOCIAL_DEFAULT_ACCESS = 1
# if SOCIAL_DEFAULT_ACCESS is used, which group should be added
# SOCIAL_DEFAULT_GROUP=guest
# Django session cookie settings. Can be changed to allow a single django application to authenticate several applications
# when running under the same database
# SESSION_COOKIE_DOMAIN=.example.com
# SESSION_COOKIE_NAME=sessionid # use this only to not interfere with non unified django applications under the same top level domain
# by default SORT_TREE_BY_NAME is disabled this will store all Keywords and Food in the order they are created
# enabling this setting makes saving new keywords and foods very slow, which doesn't matter in most usecases.
# however, when doing large imports of recipes that will create new objects, can increase total run time by 10-15x
# Keywords and Food can be manually sorted by name in Admin
# This value can also be temporarily changed in Admin, it will revert the next time the application is started
# This will be fixed/changed in the future by changing the implementation or finding a better workaround for sorting
# SORT_TREE_BY_NAME=0
# LDAP authentication
# default 0 (false), when 1 (true) list of allowed users will be fetched from LDAP server
#LDAP_AUTH=
#AUTH_LDAP_SERVER_URI=
#AUTH_LDAP_BIND_DN=
#AUTH_LDAP_BIND_PASSWORD=
#AUTH_LDAP_USER_SEARCH_BASE_DN=
#AUTH_LDAP_TLS_CACERTFILE=
#AUTH_LDAP_START_TLS=
# Enables exporting PDF (see export docs)
# Disabled by default, uncomment to enable
# ENABLE_PDF_EXPORT=1
# Recipe exports are cached for a certain time by default, adjust time if needed
# EXPORT_FILE_CACHE_DURATION=600
# if you want to do many requests to the FDC API you need to get a (free) API key. Demo key is limited to 30 requests / hour or 50 requests / day
#FDC_API_KEY=DEMO_KEY
# API throttle limits
# you may use X per second, minute, hour or day
# DRF_THROTTLE_RECIPE_URL_IMPORT=60/hour

8
tandoor/.gitignore vendored Normal file
View File

@ -0,0 +1,8 @@
.env
backup.env
/backup
!/backup/.gitkeep
/database
!/database/.gitkeep
/mediafiles
!/mediafiles/.gitkeep

39
tandoor/README.md Normal file
View File

@ -0,0 +1,39 @@
# Tandoor
[Tandoor](https://tandoor.dev/) is a recipe manager that allows you to manage your ever-growing collection of digital recipes.
## Installation
Enable Tandoor by setting `COMPOSE_PROFILES=tandoor`. It will be accessible at `/recipes`.
Copy the example environment file and edit as needed before running Tandoor: `cp tandoor/env.example tandoor/.env`.
## Backup
Tandoor's database and media files can be backed up in the cloud storage product of your choice with [Rclone](https://rclone.org/).
Before a backup can be made, `rclone config` must be run to generate the configuration file:
```shell
docker compose run --rm -it tandoor-backup rclone config
```
It will generate a `rclone.conf` configuration file in ./tandoor/rclone/rclone.conf.
Copy the backup environment file to `backup.env` and fill it as needed:
`cp backup.env.exmple backup.env`
| Variable | Description | Default |
|----------------------|---------------------------------------------------------------------|---------------------------|
| `RCLONE_REMOTE_NAME` | Name of the remote you chose during rclone config | |
| `RCLONE_REMOTE_DIR` | Name of the rclone remote dir, eg: S3 bucket name, folder name, etc | |
| `CRON` | How often to run the backup | `@daily` backup every day |
| `TIMEZONE` | Timezone, used for cron times | `America/New_York` |
| `ZIP_PASSWORD` | Password to protect the backup archive with | `123456` |
| `BACKUP_KEEP_DAYS` | How long to keep the backup in the destination | `31` days |
You can test your backup manually with:
```shell
docker compose run --rm -it tandoor-backup backup
```

View File

@ -0,0 +1,6 @@
RCLONE_REMOTE_NAME=
RCLONE_REMOTE_DIR=
CRON=@daily
TIMEZONE=America/New_York
ZIP_PASSWORD=123456
BACKUP_KEEP_DAYS=31

View File

@ -0,0 +1,69 @@
services:
tandoor:
image: ghcr.io/tandoorrecipes/recipes:latest
container_name: tandoor
restart: always
env_file:
- ${CONFIG_ROOT:-.}/tandoor/.env
volumes:
- ${CONFIG_ROOT:-.}/tandoor/database:/opt/recipes/database
- ${CONFIG_ROOT:-.}/tandoor/mediafiles:/opt/recipes/mediafiles
- tandoor-staticfiles:/opt/recipes/staticfiles
healthcheck:
test: ["CMD", "wget", "http://127.0.0.1:8080/recipes", "-qO", "/dev/null"]
interval: 30s
retries: 10
profiles:
- tandoor
tandoor-nginx:
image: public.ecr.aws/nginx/nginx:mainline-alpine
container_name: tandoor-nginx
restart: always
env_file:
- ${CONFIG_ROOT:-.}/tandoor/.env
volumes:
- ${CONFIG_ROOT:-.}/tandoor/nginx:/etc/nginx/conf.d:ro
- ${CONFIG_ROOT:-.}/tandoor/mediafiles:/media:ro
- tandoor-staticfiles:/static:ro
healthcheck:
test: ["CMD", "wget", "http://127.0.0.1/recipes", "-qO", "/dev/null"]
interval: 30s
retries: 10
depends_on:
- tandoor
labels:
- traefik.enable=true
- traefik.http.routers.tandoor.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/recipes`))
- traefik.http.routers.tandoor.tls=true
- traefik.http.routers.tandoor.tls.certresolver=myresolver
- traefik.http.services.tandoor.loadbalancer.server.port=80
- homepage.group=Apps
- homepage.name=Tandoor
- homepage.icon=tandoor-recipes.png
- homepage.href=/recipes
- homepage.description=Smart recipe management
- homepage.weight=1
profiles:
- tandoor
tandoor-backup:
image: ghcr.io/adrienpoupa/rclone-backup:latest
container_name: tandoor-backup
restart: always
env_file:
- ${CONFIG_ROOT:-.}/tandoor/backup.env
environment:
- BACKUP_FOLDER_NAME=mediafiles
- BACKUP_FOLDER_PATH=/data/mediafiles
- DB_TYPE=sqlite
- SQLITE_DATABASE=/database/recipes.db
volumes:
- ${CONFIG_ROOT:-.}/tandoor/database:/database
- ${CONFIG_ROOT:-.}/tandoor/mediafiles:/data/mediafiles
- ${CONFIG_ROOT:-.}/tandoor/backup:/config
profiles:
- tandoor
volumes:
tandoor-staticfiles:

View File

@ -0,0 +1,20 @@
<!DOCTYPE html>
<html lang="en">
<head>
<!-- Simple HttpErrorPages | MIT License | https://github.com/HttpErrorPages -->
<meta charset="utf-8" /><meta http-equiv="X-UA-Compatible" content="IE=edge" /><meta name="viewport" content="width=device-width, initial-scale=1" />
<title>502 - Webservice currently unavailable</title>
<style type="text/css">/*! normalize.css v5.0.0 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;line-height:1.15;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,footer,header,nav,section{display:block}h1{font-size:2em;margin:.67em 0}figcaption,figure,main{display:block}figure{margin:1em 40px}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent;-webkit-text-decoration-skip:objects}a:active,a:hover{outline-width:0}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:inherit}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}dfn{font-style:italic}mark{background-color:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}audio,video{display:inline-block}audio:not([controls]){display:none;height:0}img{border-style:none}svg:not(:root){overflow:hidden}button,input,optgroup,select,textarea{font-family:sans-serif;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=reset],[type=submit],button,html [type=button]{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{display:inline-block;vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-cancel-button,[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details,menu{display:block}summary{display:list-item}canvas{display:inline-block}template{display:none}[hidden]{display:none}/*! Simple HttpErrorPages | MIT X11 License | https://github.com/AndiDittrich/HttpErrorPages */body,html{width:100%;height:100%;background-color:#21232a}body{color:#fff;text-align:center;text-shadow:0 2px 4px rgba(0,0,0,.5);padding:0;min-height:100%;-webkit-box-shadow:inset 0 0 100px rgba(0,0,0,.8);box-shadow:inset 0 0 100px rgba(0,0,0,.8);display:table;font-family:"Open Sans",Arial,sans-serif}h1{font-family:inherit;font-weight:500;line-height:1.1;color:inherit;font-size:36px}h1 small{font-size:68%;font-weight:400;line-height:1;color:#777}a{text-decoration:none;color:#fff;font-size:inherit;border-bottom:dotted 1px #707070}.lead{color:silver;font-size:21px;line-height:1.4}.cover{display:table-cell;vertical-align:middle;padding:0 20px}footer{position:fixed;width:100%;height:40px;left:0;bottom:0;color:#a0a0a0;font-size:14px}</style>
</head>
<body>
<div class="cover"><h1>Tandoor Recipes is not yet available <small>502</small></h1>
<p class="lead">
Services are still trying to start.<br>
Please allow up to 3 minutes after you started the application on your server.<br><br>
If this status persists, check the application or docker logs for further information.<br>
After checking and trying everything mentioned in the <a href="https://docs.tandoor.dev/" target="_blank">docs</a>, you can request help on the project's <a href="https://github.com/TandoorRecipes/recipes/issues/new?assignees=&amp;labels=setup+issue&amp;template=help_request.yml" target="_blank">GitHub</a> page.
</p>
</div>
</body>
</html>

View File

@ -0,0 +1,29 @@
server {
listen 80;
server_name localhost;
client_max_body_size 128M;
# serve media files
location /recipes/media/ {
alias /media/;
}
location /recipes/static/ {
alias /static/;
}
# pass requests for dynamic content to gunicorn
location / {
proxy_set_header Host $http_host;
proxy_pass http://tandoor:8080;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Script-Name /recipes;
proxy_cookie_path / /recipes;
error_page 502 /errors/http502.html;
}
location /recipes/errors/ {
alias /etc/nginx/conf.d/errorpages/;
internal;
}
}

49
update-config.sh Executable file
View File

@ -0,0 +1,49 @@
#!/bin/bash
# See https://stackoverflow.com/a/44864004 for the sed GNU/BSD compatible hack
function update_arr_config {
echo "Updating ${container} configuration..."
until [ -f "${CONFIG_ROOT:-.}"/"$container"/config.xml ]; do sleep 1; done
sed -i.bak "s/<UrlBase><\/UrlBase>/<UrlBase>\/$1<\/UrlBase>/" "${CONFIG_ROOT:-.}"/"$container"/config.xml && rm "${CONFIG_ROOT:-.}"/"$container"/config.xml.bak
CONTAINER_NAME_UPPER=$(echo "$container" | tr '[:lower:]' '[:upper:]')
sed -i.bak 's/^'"${CONTAINER_NAME_UPPER}"'_API_KEY=.*/'"${CONTAINER_NAME_UPPER}"'_API_KEY='"$(sed -n 's/.*<ApiKey>\(.*\)<\/ApiKey>.*/\1/p' "${CONFIG_ROOT:-.}"/"$container"/config.xml)"'/' .env && rm .env.bak
echo "Update of ${container} configuration complete, restarting..."
docker compose restart "$container"
}
function update_qbittorrent_config {
echo "Updating ${container} configuration..."
docker compose stop "$container"
until [ -f "${CONFIG_ROOT:-.}"/"$container"/qBittorrent/qBittorrent.conf ]; do sleep 1; done
sed -i.bak '/WebUI\\ServerDomains=*/a WebUI\\Password_PBKDF2="@ByteArray(ARQ77eY1NUZaQsuDHbIMCA==:0WMRkYTUWVT9wVvdDtHAjU9b3b7uB8NR1Gur2hmQCvCDpm39Q+PsJRJPaCU51dEiz+dTzh8qbPsL8WkFljQYFQ==)"' "${CONFIG_ROOT:-.}"/"$container"/qBittorrent/qBittorrent.conf && rm "${CONFIG_ROOT:-.}"/"$container"/qBittorrent/qBittorrent.conf.bak
echo "Update of ${container} configuration complete, restarting..."
docker compose start "$container"
}
function update_bazarr_config {
echo "Updating ${container} configuration..."
until [ -f "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml ]; do sleep 1; done
sed -i.bak "s/base_url: ''/base_url: '\/$container'/" "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml && rm "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml.bak
sed -i.bak "s/use_radarr: false/use_radarr: true/" "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml && rm "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml.bak
sed -i.bak "s/use_sonarr: false/use_sonarr: true/" "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml && rm "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml.bak
until [ -f "${CONFIG_ROOT:-.}"/sonarr/config.xml ]; do sleep 1; done
SONARR_API_KEY=$(sed -n 's/.*<ApiKey>\(.*\)<\/ApiKey>.*/\1/p' "${CONFIG_ROOT:-.}"/sonarr/config.xml)
sed -i.bak "/sonarr:/,/^radarr:/ { s/apikey: .*/apikey: $SONARR_API_KEY/; s/base_url: .*/base_url: \/sonarr/; s/ip: .*/ip: sonarr/ }" "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml && rm "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml.bak
until [ -f "${CONFIG_ROOT:-.}"/radarr/config.xml ]; do sleep 1; done
RADARR_API_KEY=$(sed -n 's/.*<ApiKey>\(.*\)<\/ApiKey>.*/\1/p' "${CONFIG_ROOT:-.}"/radarr/config.xml)
sed -i.bak "/radarr:/,/^sonarr:/ { s/apikey: .*/apikey: $RADARR_API_KEY/; s/base_url: .*/base_url: \/radarr/; s/ip: .*/ip: radarr/ }" "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml && rm "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml.bak
sed -i.bak 's/^BAZARR_API_KEY=.*/BAZARR_API_KEY='"$(sed -n 's/.*apikey: \(.*\)*/\1/p' "${CONFIG_ROOT:-.}"/"$container"/config/config/config.yaml | head -n 1)"'/' .env && rm .env.bak
echo "Update of ${container} configuration complete, restarting..."
docker compose restart "$container"
}
for container in $(docker ps --format '{{.Names}}'); do
if [[ "$container" =~ ^(radarr|sonarr|lidarr|prowlarr)$ ]]; then
update_arr_config "$container"
elif [[ "$container" =~ ^(bazarr)$ ]]; then
update_bazarr_config "$container"
elif [[ "$container" =~ ^(qbittorrent)$ ]]; then
update_qbittorrent_config "$container"
fi
done