Initial commit of Docker files

This commit is contained in:
2025-08-18 00:26:57 +12:00
commit 05297cf246
29 changed files with 2517 additions and 0 deletions

76
.env.template Executable file
View File

@@ -0,0 +1,76 @@
# ===========================================
# DOCKER COMPOSE ENVIRONMENT CONFIGURATION
# ===========================================
# Copy this file to .env and fill in your values
# DO NOT commit .env to version control
# ===== SYSTEM CONFIGURATION =====
TZ=Pacific/Auckland
PUID=1000
PGID=1000
# ===== DOMAINS & URLS =====
DOMAIN=kansaigaijin.com
JELLYFIN_URL=https://jellyfin.${DOMAIN}
SCROBBLE_URL=https://scrobble.${DOMAIN}
MALOJA_URL=https://maloja.${DOMAIN}
PAPERLESS_URL=https://paperless.${DOMAIN}
PANGOLIN_URL=https://png.${DOMAIN}
# ===== DIRECTORY PATHS =====
DATA_ROOT=/data
DOCKER_CONFIG_ROOT=../docker-local
TORRENTS_PATH=${DATA_ROOT}/torrents
# ===== DATABASE CREDENTIALS =====
# Speedtest Tracker Database
SPEEDTEST_DB_NAME=speedtest_tracker
SPEEDTEST_DB_USER=speedtest_tracker
SPEEDTEST_DB_PASSWORD=CHANGE_ME_SPEEDTEST_DB_PASSWORD
# Paperless Database
PAPERLESS_DB_NAME=paperless
PAPERLESS_DB_USER=paperless
PAPERLESS_DB_PASSWORD=CHANGE_ME_PAPERLESS_DB_PASSWORD
PAPERLESS_DB_ROOT_PASSWORD=CHANGE_ME_PAPERLESS_ROOT_PASSWORD
# ===== APPLICATION SECRETS =====
# Maloja
MALOJA_FORCE_PASSWORD=CHANGE_ME_MALOJA_PASSWORD
# Speedtest Tracker
SPEEDTEST_APP_KEY=CHANGE_ME_BASE64_APP_KEY
# Spotify Integration
SPOTIFY_CLIENT_ID=CHANGE_ME_SPOTIFY_CLIENT_ID
SPOTIFY_CLIENT_SECRET=CHANGE_ME_SPOTIFY_CLIENT_SECRET
SPOTIFY_REDIRECT_URI=${SCROBBLE_URL}/callback
# Maloja API
MALOJA_API_KEY=CHANGE_ME_MALOJA_API_KEY
# Newt Service
NEWT_ID=CHANGE_ME_NEWT_ID
NEWT_SECRET=CHANGE_ME_NEWT_SECRET
# ===== OPTIONAL CONFIGURATIONS =====
# Homepage
HOMEPAGE_ALLOWED_HOSTS=${DOMAIN}
# Speedtest
SPEEDTEST_SERVERS=7317
SPEEDTEST_SCHEDULE=30 * * * *
# Docker Proxy (security settings)
DOCKER_PROXY_CONTAINERS=1
DOCKER_PROXY_SERVICES=1
DOCKER_PROXY_TASKS=1
DOCKER_PROXY_POST=0
# Watchtower
WATCHTOWER_CLEANUP=true
WATCHTOWER_POLL_INTERVAL=86400
# ===== PROFILE CONTROL =====
# Uncomment to enable specific service groups
# COMPOSE_PROFILES=media,utilities,documents,monitoring

103
.gitignore vendored Executable file
View File

@@ -0,0 +1,103 @@
# ===========================================
# DOCKER COMPOSE SECURITY & DATA FILES
# ===========================================
# Environment files with secrets
.env
.env.local
.env.production
.env.development
# Service configuration and data directories
# (Keeps service folders but ignores data/config inside them)
*/config/
*/data/
*/logs/
*/cache/
*/db/
*/media/
# Specific service data patterns
qBittorrent/config/
Homepage/config/
Homepage/data/
filebrowser/config/
filebrowser/data/
maloja/config/
maloja/data/
maloja/logs/
scrobble/config/
scrobble/data/
stirling/*/config/
stirling/*/data/
stirling/*/logs/
paperless/data/
paperless/media/
paperless/export/
paperless/consume/
gramps/gramps_*/
obsidian/vaults/
obsidian/config/
syncthing/
rustdesk/data/
racknerd-converter/data/
racknerd-converter/logs/
# Docker volumes and bind mounts from external paths
../docker-local/
docker-local/
# Database files
*.db
*.sqlite
*.sqlite3
# Log files
*.log
logs/
# Temporary files
tmp/
temp/
*.tmp
# Backup files
*.bak
*.backup
*~
# OS specific files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
desktop.ini
# IDE files
.vscode/
.idea/
*.swp
*.swo
# Docker specific
.dockerignore
docker-compose.override.yml
# SSL certificates and keys
*.pem
*.key
*.crt
*.p12
*.pfx
# SSH keys
id_rsa
id_rsa.pub
known_hosts
# Runtime files
*.pid
*.sock

View File

@@ -0,0 +1 @@
4964985276.1738142532

View File

@@ -0,0 +1,28 @@
{
"data": [
{
"text": "Welcome to the new announcement section. It will keep you up-to-date with important news. You can dismiss this announcement by clicking on the button at the end of this line.",
"link": "",
"hash": "",
"dismissible": true,
"timestamp": 1676235999,
"enabled": true
},
{
"text": "Opensubtitles.org is now only accessible to VIP users. If you're still using it and do not plan to get a VIP subscription, you should consider disabling it and move to opensubtitles.com.",
"link": "",
"hash": "",
"dismissible": true,
"timestamp": 1700791126,
"enabled": true
},
{
"text": "We're about to drop support for legacy Sonarr and Radarr versions (prior to v3). If you're still using those legacy versions, please consider upgrading ASAP.",
"link": "",
"hash": "",
"dismissible": true,
"timestamp": 1731247748,
"enabled": true
}
]
}

View File

@@ -0,0 +1,274 @@
---
addic7ed:
cookies: ''
password: ''
user_agent: ''
username: ''
vip: false
analytics:
enabled: true
anidb:
api_client: ''
api_client_ver: 1
animetosho:
anidb_api_client: ''
anidb_api_client_ver: 1
search_threshold: 6
anticaptcha:
anti_captcha_key: ''
assrt:
token: ''
auth:
apikey: f4e633d900a236934a951cede0e9176a
password: ''
type: null
username: ''
avistaz:
cookies: ''
user_agent: ''
backup:
day: 6
folder: /config/backup
frequency: Weekly
hour: 3
retention: 31
betaseries:
token: ''
cinemaz:
cookies: ''
user_agent: ''
cors:
enabled: false
deathbycaptcha:
password: ''
username: ''
embeddedsubtitles:
fallback_lang: en
hi_fallback: false
included_codecs: []
timeout: 600
unknown_as_fallback: false
general:
adaptive_searching: true
adaptive_searching_delay: 3w
adaptive_searching_delta: 1w
anti_captcha_provider: null
auto_update: true
base_url: ''
branch: master
chmod: '0640'
chmod_enabled: false
days_to_upgrade_subs: 7
debug: false
default_und_audio_lang: ''
default_und_embedded_subtitles_lang: ''
dont_notify_manual_actions: false
embedded_subs_show_desired: true
embedded_subtitles_parser: ffprobe
enabled_integrations: []
enabled_providers: []
flask_secret_key: 2cea2a0dfa96a753c70ac9b5c6160ccb
hi_extension: hi
ignore_ass_subs: false
ignore_pgs_subs: false
ignore_vobsub_subs: false
ip: '*'
language_equals: []
minimum_score: 90
minimum_score_movie: 70
movie_default_enabled: false
movie_default_profile: ''
movie_tag_enabled: false
multithreading: true
page_size: 25
parse_embedded_audio_track: false
path_mappings: []
path_mappings_movie: []
port: 6767
postprocessing_cmd: ''
postprocessing_threshold: 90
postprocessing_threshold_movie: 70
remove_profile_tags: []
serie_default_enabled: false
serie_default_profile: ''
serie_tag_enabled: false
single_language: false
skip_hashing: false
subfolder: current
subfolder_custom: ''
subzero_mods: ''
theme: auto
upgrade_frequency: 12
upgrade_manual: true
upgrade_subs: true
use_embedded_subs: true
use_postprocessing: false
use_postprocessing_threshold: false
use_postprocessing_threshold_movie: false
use_radarr: false
use_scenename: true
use_sonarr: false
utf8_encode: true
wanted_search_frequency: 6
wanted_search_frequency_movie: 6
hdbits:
passkey: ''
username: ''
jimaku:
api_key: ''
enable_ai_subs: false
enable_archives_download: false
enable_name_search_fallback: true
karagarga:
f_password: ''
f_username: ''
password: ''
username: ''
ktuvit:
email: ''
hashed_password: ''
legendasdivx:
password: ''
skip_wrong_fps: false
username: ''
legendasnet:
password: ''
username: ''
log:
exclude_filter: ''
ignore_case: false
include_filter: ''
use_regex: false
movie_scores:
audio_codec: 3
edition: 1
hash: 119
hearing_impaired: 1
release_group: 13
resolution: 2
source: 7
streaming_service: 1
title: 60
video_codec: 2
year: 30
napiprojekt:
only_authors: false
only_real_names: false
napisy24:
password: ''
username: ''
opensubtitles:
password: ''
skip_wrong_fps: false
ssl: false
timeout: 15
use_tag_search: false
username: ''
vip: false
opensubtitlescom:
include_ai_translated: false
password: ''
use_hash: true
username: ''
podnapisi:
verify_ssl: true
postgresql:
database: ''
enabled: false
host: localhost
password: ''
port: 5432
username: ''
proxy:
exclude:
- localhost
- 127.0.0.1
password: ''
port: ''
type: null
url: ''
username: ''
radarr:
apikey: ''
base_url: /
defer_search_signalr: false
excluded_tags: []
full_update: Daily
full_update_day: 6
full_update_hour: 4
http_timeout: 60
ip: 127.0.0.1
movies_sync: 60
only_monitored: false
port: 7878
ssl: false
sync_only_monitored_movies: false
use_ffprobe_cache: true
series_scores:
audio_codec: 3
episode: 30
hash: 359
hearing_impaired: 1
release_group: 14
resolution: 2
season: 30
series: 180
source: 7
streaming_service: 1
video_codec: 2
year: 90
sonarr:
apikey: ''
base_url: /
defer_search_signalr: false
exclude_season_zero: false
excluded_series_types: []
excluded_tags: []
full_update: Daily
full_update_day: 6
full_update_hour: 4
http_timeout: 60
ip: 127.0.0.1
only_monitored: false
port: 8989
series_sync: 60
ssl: false
sync_only_monitored_episodes: false
sync_only_monitored_series: false
use_ffprobe_cache: true
subdl:
api_key: ''
subf2m:
user_agent: ''
verify_ssl: true
subsync:
checker:
blacklisted_languages: []
blacklisted_providers: []
debug: false
force_audio: false
gss: true
max_offset_seconds: 60
no_fix_framerate: true
subsync_movie_threshold: 70
subsync_threshold: 90
use_subsync: false
use_subsync_movie_threshold: false
use_subsync_threshold: false
titlovi:
password: ''
username: ''
titulky:
approved_only: false
password: ''
skip_wrong_fps: false
username: ''
whisperai:
endpoint: http://127.0.0.1:9000
loglevel: INFO
pass_video_name: false
response: 5
timeout: 3600
xsubs:
password: ''
username: ''

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,16 @@
<?xml version="1.0" encoding="utf-8"?>
<key id="076c8fc0-d072-456f-a8c8-b5d4722d497d" version="1">
<creationDate>2025-01-29T09:22:08.4496837Z</creationDate>
<activationDate>2025-01-29T09:22:08.4381256Z</activationDate>
<expirationDate>2025-04-29T09:22:08.4381256Z</expirationDate>
<descriptor deserializerType="Microsoft.AspNetCore.DataProtection.AuthenticatedEncryption.ConfigurationModel.AuthenticatedEncryptorDescriptorDeserializer, Microsoft.AspNetCore.DataProtection, Version=6.0.0.0, Culture=neutral, PublicKeyToken=adb9793829ddae60">
<descriptor>
<encryption algorithm="AES_256_CBC" />
<validation algorithm="HMACSHA256" />
<masterKey p4:requiresEncryption="true" xmlns:p4="http://schemas.asp.net/2015/03/dataProtection">
<!-- Warning: the key below is in an unencrypted form. -->
<value>0i5M9BhZYxyRZUnt26LMNZ4ukvPyWz1P2CVr4iqIfZFTBANQrCeRHQL9RXc0dbhU7Gi2ECvniDLHgNUfcvZfaA==</value>
</masterKey>
</descriptor>
</descriptor>
</key>

16
Arrs/Prowlarr/config/config.xml Executable file
View File

@@ -0,0 +1,16 @@
<Config>
<BindAddress>*</BindAddress>
<Port>9696</Port>
<SslPort>6969</SslPort>
<EnableSsl>False</EnableSsl>
<LaunchBrowser>True</LaunchBrowser>
<ApiKey>2e061a290fb24d8792ebb2d70b15bf67</ApiKey>
<AuthenticationMethod>None</AuthenticationMethod>
<AuthenticationRequired>Enabled</AuthenticationRequired>
<Branch>master</Branch>
<LogLevel>debug</LogLevel>
<SslCertPath></SslCertPath>
<SslCertPassword></SslCertPassword>
<UrlBase></UrlBase>
<InstanceName>Prowlarr</InstanceName>
</Config>

View File

@@ -0,0 +1,16 @@
<?xml version="1.0" encoding="utf-8"?>
<key id="4bdf6bfd-92c6-4723-8e40-a1abcb19db17" version="1">
<creationDate>2025-01-29T09:22:08.5915717Z</creationDate>
<activationDate>2025-01-29T09:22:08.5787744Z</activationDate>
<expirationDate>2025-04-29T09:22:08.5787744Z</expirationDate>
<descriptor deserializerType="Microsoft.AspNetCore.DataProtection.AuthenticatedEncryption.ConfigurationModel.AuthenticatedEncryptorDescriptorDeserializer, Microsoft.AspNetCore.DataProtection, Version=6.0.0.0, Culture=neutral, PublicKeyToken=adb9793829ddae60">
<descriptor>
<encryption algorithm="AES_256_CBC" />
<validation algorithm="HMACSHA256" />
<masterKey p4:requiresEncryption="true" xmlns:p4="http://schemas.asp.net/2015/03/dataProtection">
<!-- Warning: the key below is in an unencrypted form. -->
<value>u+w7PRy5v7u9F6/9RnBwbm0FeKyLyyR9/PPgCKe9Hvgapo2NBpjXgQOUO+mXRsYeTM8aL9sAMd80BJPAeQWZPA==</value>
</masterKey>
</descriptor>
</descriptor>
</key>

16
Arrs/Radarr/config/config.xml Executable file
View File

@@ -0,0 +1,16 @@
<Config>
<BindAddress>*</BindAddress>
<Port>7878</Port>
<SslPort>9898</SslPort>
<EnableSsl>False</EnableSsl>
<LaunchBrowser>True</LaunchBrowser>
<ApiKey>6c4a2d0e6cd547ad9681d428786d45f7</ApiKey>
<AuthenticationMethod>None</AuthenticationMethod>
<AuthenticationRequired>Enabled</AuthenticationRequired>
<Branch>master</Branch>
<LogLevel>debug</LogLevel>
<SslCertPath></SslCertPath>
<SslCertPassword></SslCertPassword>
<UrlBase></UrlBase>
<InstanceName>Radarr</InstanceName>
</Config>

View File

@@ -0,0 +1,16 @@
<?xml version="1.0" encoding="utf-8"?>
<key id="7fe5740c-65be-4e37-9015-402fa9a48df0" version="1">
<creationDate>2025-01-29T09:22:08.4520862Z</creationDate>
<activationDate>2025-01-29T09:22:08.440317Z</activationDate>
<expirationDate>2025-04-29T09:22:08.440317Z</expirationDate>
<descriptor deserializerType="Microsoft.AspNetCore.DataProtection.AuthenticatedEncryption.ConfigurationModel.AuthenticatedEncryptorDescriptorDeserializer, Microsoft.AspNetCore.DataProtection, Version=6.0.0.0, Culture=neutral, PublicKeyToken=adb9793829ddae60">
<descriptor>
<encryption algorithm="AES_256_CBC" />
<validation algorithm="HMACSHA256" />
<masterKey p4:requiresEncryption="true" xmlns:p4="http://schemas.asp.net/2015/03/dataProtection">
<!-- Warning: the key below is in an unencrypted form. -->
<value>C1ExEujkCoR0uN5myc8nRpZboUVQDdEeu2EkGqISG1/m8hC/LiDeQj7GMEIZFTjxRUSB4JSfU2CvjsvYGGEt4A==</value>
</masterKey>
</descriptor>
</descriptor>
</key>

16
Arrs/Sonarr/config/config.xml Executable file
View File

@@ -0,0 +1,16 @@
<Config>
<BindAddress>*</BindAddress>
<Port>8989</Port>
<SslPort>9898</SslPort>
<EnableSsl>False</EnableSsl>
<LaunchBrowser>True</LaunchBrowser>
<ApiKey>c9dba8cdc00741f299e96c518372118f</ApiKey>
<AuthenticationMethod>None</AuthenticationMethod>
<AuthenticationRequired>Enabled</AuthenticationRequired>
<Branch>main</Branch>
<LogLevel>trace</LogLevel>
<SslCertPath></SslCertPath>
<SslCertPassword></SslCertPassword>
<UrlBase></UrlBase>
<InstanceName>Sonarr</InstanceName>
</Config>

204
README.md Executable file
View File

@@ -0,0 +1,204 @@
# Docker Media & Utility Stack
A comprehensive Docker Compose setup for media management, document processing, and various utility services.
## 🚀 Quick Start
```bash
# Clone the repository
git clone <your-repo-url>
cd <repo-name>
# Start all services
docker compose up -d
# Or start specific service groups
docker compose --profile media up -d
```
## 📋 Services Overview
### 🎬 Media Stack (`media` profile)
- **Sonarr** (8989) - TV series management
- **Radarr** (7878) - Movie management
- **Lidarr** (8686) - Music management
- **Bazarr** (6767) - Subtitle management
- **Prowlarr** (9696) - Indexer management
- **FlareSolverr** (8191) - Cloudflare bypass
- **Jellyfin** (8096) - Media streaming server
- **Jellyseerr** (5055) - Media request management
- **qBittorrent** (7070) - Download client
- **Multi-Scrobbler** (9078) - Music scrobbling
- **Maloja** (42010) - Music statistics
### 🛠️ Utilities (`utilities` profile)
- **Homepage** (7575) - Service dashboard
- **FileBrowser** (6633) - Web file manager
- **Syncthing** (8384) - File synchronization
- **Obsidian Remote** (8181) - Note-taking
- **Stirling PDF** (8090) - PDF processing
- **RustDesk** - Remote desktop server
- **Gramps** (5511) - Genealogy management
- **Newt** - Notification service
- **RackNerd API Converter** (5000) - VPS monitoring API
### 📄 Documents (`documents` profile)
- **Paperless NGX** (8100) - Document management
- **Paperless AI** (3040) - AI document enhancement
- **Gotenberg** - Document conversion
- **Tika** - Content extraction
### 📊 Monitoring (`monitoring` profile)
- **Speedtest Tracker** (8180) - Network monitoring
- **Watchtower** - Container updates
## 🏗️ Architecture
The stack is organized into logical service groups:
```
├── docker-compose.yml # Main orchestration
├── compose/
│ ├── media-stack.yml # *arr services & Jellyfin
│ ├── utilities.yml # General utilities
│ ├── document-management.yml # Paperless stack
│ └── monitoring.yml # Monitoring services
├── .env # Environment variables
└── .env.template # Template for new deployments
```
## 🔧 Configuration
### Environment Variables
Key configuration is handled through `.env`:
- `DOMAIN` - Your domain name
- `TZ` - Timezone (Pacific/Auckland)
- `DATA_ROOT` - Media storage path (/data)
- Database credentials for各服务
### Service Profiles
Control which services start:
```bash
# Start only media services
export COMPOSE_PROFILES=media
docker compose up -d
# Start multiple profiles
export COMPOSE_PROFILES=media,utilities
docker compose up -d
# Start everything
export COMPOSE_PROFILES=all
docker compose up -d
```
## 📁 Directory Structure
Ensure these directories exist:
```
/data/ # Media storage
├── movies/
├── tv/
├── music/
└── torrents/
../docker-local/ # Container configs
├── Arrs/
│ ├── Sonarr/config/
│ ├── Radarr/config/
│ └── ...
└── [service-name]/config/
```
## 🔒 Security Features
- **No hardcoded secrets** - All sensitive data in `.env`
- **Least privilege** - `no-new-privileges` security options
- **Network isolation** - Separate networks for different stacks
- **Health checks** - Automatic service monitoring
- **Read-only mounts** - Docker socket proxy with restricted access
## 🌐 Network Configuration
- **arr_network** (172.20.0.0/16) - Media services
- **database_network** (172.21.0.0/16) - Database services
Static IPs assigned for reliable service discovery.
## 📝 Service URLs
Once running, access services at:
- **Homepage**: http://localhost:7575
- **Jellyfin**: http://localhost:8096
- **Sonarr**: http://localhost:8989
- **Radarr**: http://localhost:7878
- **qBittorrent**: http://localhost:7070
- **Paperless**: http://localhost:8100
- **FileBrowser**: http://localhost:6633
- **RackNerd API**: http://localhost:5000
## 🔄 Management Commands
```bash
# View running services
docker compose ps
# View logs
docker compose logs [service-name]
# Update a service
docker compose pull [service-name]
docker compose up -d [service-name]
# Stop all services
docker compose down
# Stop and remove volumes (⚠️ DATA LOSS)
docker compose down -v
```
## 🚨 Troubleshooting
### Common Issues
1. **Port conflicts**: Check if ports are already in use
2. **Permission issues**: Ensure PUID/PGID match your user
3. **Volume mounts**: Verify directory paths exist
4. **Network issues**: Check firewall settings
### Health Checks
Most services include health checks. View status:
```bash
docker compose ps
# Shows health status for each service
```
## 🔐 Security Notes
- `.env` contains sensitive data - keep it secure
- Consider using Docker secrets for production
- Regular updates via Watchtower
- Monitor access logs
## 🤝 Contributing
1. Follow the existing structure when adding services
2. Add health checks to new services
3. Use environment variables for configuration
4. Update documentation for new services
## 📄 License
[Add your license here]
---
**⚠️ Important**: This setup includes production credentials. Ensure `.env` is never committed to version control.

View File

@@ -0,0 +1,54 @@
services:
web:
#build: ./frontend/
image: ghcr.io/seanmorley15/adventurelog-frontend:latest
container_name: adventurelog-frontend
restart: unless-stopped
environment:
- PUBLIC_SERVER_URL=http://server:8000 # Should be the service name of the backend with port 8000, even if you change the port in the backend service
- ORIGIN=https://adventure.kansaigaijin.com
- BODY_SIZE_LIMIT=Infinity
ports:
- "8015:3000"
depends_on:
- server
db:
image: postgis/postgis:15-3.3
container_name: adventurelog-db
restart: unless-stopped
environment:
POSTGRES_DB: database
POSTGRES_USER: adventure
POSTGRES_PASSWORD: Figureitout0313
volumes:
- postgres_data:/var/lib/postgresql/data/
server:
#build: ./backend/
image: ghcr.io/seanmorley15/adventurelog-backend:latest
container_name: adventurelog-backend
restart: unless-stopped
environment:
- PGHOST=db
- PGDATABASE=database
- PGUSER=adventure
- PGPASSWORD=Figureitout0313
- SECRET_KEY=Figureitout0313
- DJANGO_ADMIN_USERNAME=admin
- DJANGO_ADMIN_PASSWORD=admin
- DJANGO_ADMIN_EMAIL=jamie@kansaigaijin.com
- PUBLIC_URL=https://adventureadmin.kansaigaijin.com # Match the outward port, used for the creation of image urls
- CSRF_TRUSTED_ORIGINS=http://localhost:8016,http://localhost:8015,https://adventure.kansaigaijin.com,https://adventureadmin.kansaigaijin.com # Comma separated list of trusted origins for CSRF
- DEBUG=False
- FRONTEND_URL=https://adventure.kansaigaijin.com # Used for email generation. This should be the url of the frontend
ports:
- "8016:80"
depends_on:
- db
volumes:
- adventurelog_media:/code/media/
volumes:
postgres_data:
adventurelog_media:

150
compose/document-management.yml Executable file
View File

@@ -0,0 +1,150 @@
# Document Management Stack - Paperless NGX
services:
# ===== PAPERLESS INFRASTRUCTURE =====
paperless-broker:
container_name: paperless-broker
image: redis:8-alpine
profiles: ["documents", "all"]
restart: unless-stopped
volumes:
- redisdata:/data
networks:
- database_network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
security_opt:
- no-new-privileges:true
# ===== DOCUMENT PROCESSING SERVICES =====
paperless-gotenberg:
container_name: paperless-gotenberg
image: gotenberg/gotenberg:8.20
profiles: ["documents", "all"]
restart: unless-stopped
command:
- "gotenberg"
- "--chromium-disable-javascript=true"
- "--chromium-allow-list=file:///tmp/.*"
networks:
- database_network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
interval: 30s
timeout: 10s
retries: 3
paperless-tika:
container_name: paperless-tika
image: apache/tika:latest
profiles: ["documents", "all"]
restart: unless-stopped
networks:
- database_network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9998/version"]
interval: 30s
timeout: 10s
retries: 3
# ===== MAIN PAPERLESS APPLICATION =====
paperless-webserver:
container_name: paperless-webserver
image: ghcr.io/paperless-ngx/paperless-ngx:latest
profiles: ["documents", "all"]
restart: unless-stopped
depends_on:
paperless-db:
condition: service_healthy
paperless-broker:
condition: service_started
paperless-gotenberg:
condition: service_started
paperless-tika:
condition: service_started
ports:
- "8100:8000"
volumes:
- ./paperless/data:/usr/src/paperless/data
- ./paperless/media:/usr/src/paperless/media
- ./paperless/export:/usr/src/paperless/export
- ./paperless/consume:/usr/src/paperless/consume
env_file: ./paperless/docker-compose.env
environment:
- PAPERLESS_REDIS=redis://paperless-broker:6379
- PAPERLESS_DBENGINE=mariadb
- PAPERLESS_DBHOST=paperless-db
- PAPERLESS_DBUSER=${PAPERLESS_DB_USER}
- PAPERLESS_DBPASS=${PAPERLESS_DB_PASSWORD}
- PAPERLESS_DBPORT=3306
- PAPERLESS_URL=${PAPERLESS_URL}
- PAPERLESS_TIKA_ENABLED=1
- PAPERLESS_TIKA_GOTENBERG_ENDPOINT=http://paperless-gotenberg:3000
- PAPERLESS_TIKA_ENDPOINT=http://paperless-tika:9998
networks:
- database_network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000"]
interval: 30s
timeout: 10s
retries: 3
# ===== AI ENHANCEMENT =====
paperless-ai:
image: clusterzx/paperless-ai:latest
container_name: paperless-ai
profiles: ["documents", "all"]
restart: unless-stopped
depends_on:
- paperless-webserver
cap_drop:
- ALL
security_opt:
- no-new-privileges:true
environment:
- PUID=${PUID}
- PGID=${PGID}
- RAG_SERVICE_URL=http://localhost:8000
- RAG_SERVICE_ENABLED=true
ports:
- "3040:3000"
volumes:
- aidata:/app/data
networks:
database_network:
external: true
volumes:
redisdata:
external: true
dbdata:
external: true
aidata:
external: true
paperless-db:
container_name: paperless-db
image: mariadb:11
profiles: ["documents", "all"]
restart: unless-stopped
volumes:
- dbdata:/var/lib/mysql
environment:
- MARIADB_HOST=paperless
- MARIADB_DATABASE=${PAPERLESS_DB_NAME}
- MARIADB_USER=${PAPERLESS_DB_USER}
- MARIADB_PASSWORD=${PAPERLESS_DB_PASSWORD}
- MARIADB_ROOT_PASSWORD=${PAPERLESS_DB_ROOT_PASSWORD}
networks:
- database_network
healthcheck:
test: ["CMD", "mariadb-admin", "ping", "-h", "localhost", "-u", "root", "-p$$MARIADB_ROOT_PASSWORD"]
interval: 10s
timeout: 5s
retries: 10
start_period: 30s
security_opt:
- no-new-privileges:true

253
compose/media-stack.yml Executable file
View File

@@ -0,0 +1,253 @@
# Media Stack - *arr services, Jellyfin, torrent client
services:
# ===== MEDIA MANAGEMENT =====
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
profiles: ["media", "all"]
environment:
- TZ=${TZ}
- PUID=${PUID}
- PGID=${PGID}
volumes:
- ${DOCKER_CONFIG_ROOT}/Arrs/Sonarr/config:/config
- ${DATA_ROOT}:/data
- ${TORRENTS_PATH}:/downloads
ports:
- "8989:8989"
networks:
arr_network:
ipv4_address: 172.20.0.3
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8989/ping"]
interval: 30s
timeout: 10s
retries: 3
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
profiles: ["media", "all"]
environment:
- TZ=${TZ}
- PUID=${PUID}
- PGID=${PGID}
volumes:
- ${DOCKER_CONFIG_ROOT}/Arrs/Radarr/config:/config
- ${DATA_ROOT}:/data
- ${TORRENTS_PATH}:/downloads
ports:
- "7878:7878"
networks:
arr_network:
ipv4_address: 172.20.0.5
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:7878/ping"]
interval: 30s
timeout: 10s
retries: 3
lidarr:
image: lscr.io/linuxserver/lidarr:latest
container_name: lidarr
profiles: ["media", "all"]
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ${DOCKER_CONFIG_ROOT}/Arrs/Lidarr/config:/config
- ${DATA_ROOT}:/data
- ${TORRENTS_PATH}:/downloads
ports:
- "8686:8686"
restart: unless-stopped
networks:
arr_network:
ipv4_address: 172.20.0.7
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8686/ping"]
interval: 30s
timeout: 10s
retries: 3
bazarr:
image: lscr.io/linuxserver/bazarr:latest
container_name: bazarr
profiles: ["media", "all"]
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ${DOCKER_CONFIG_ROOT}/Arrs/Bazarr/config:/config
- ${DATA_ROOT}:/data
networks:
arr_network:
ipv4_address: 172.20.0.6
ports:
- "6767:6767"
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:6767/ping"]
interval: 30s
timeout: 10s
retries: 3
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
profiles: ["media", "all"]
environment:
- TZ=${TZ}
- PUID=${PUID}
- PGID=${PGID}
volumes:
- ${DOCKER_CONFIG_ROOT}/Arrs/Prowlarr/config:/config
ports:
- "9696:9696"
networks:
arr_network:
ipv4_address: 172.20.0.4
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9696/ping"]
interval: 30s
timeout: 10s
retries: 3
flaresolverr:
image: ghcr.io/flaresolverr/flaresolverr:latest
container_name: flaresolverr
profiles: ["media", "all"]
environment:
- TZ=${TZ}
ports:
- "8191:8191"
networks:
arr_network:
ipv4_address: 172.20.0.8
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8191/health"]
interval: 30s
timeout: 10s
retries: 3
# ===== MEDIA STREAMING =====
jellyfin:
image: jellyfin/jellyfin:latest
container_name: jellyfin
profiles: ["media", "all"]
ports:
- "8096:8096"
volumes:
- ${DOCKER_CONFIG_ROOT}/Arrs/Jellyfin/config:/config
- ${DOCKER_CONFIG_ROOT}/Arrs/Jellyfin/cache:/cache
- ${DATA_ROOT}:/data
- /dev/dri/renderD128:/dev/dri/renderD128
group_add:
- "104"
restart: unless-stopped
environment:
- TZ=${TZ}
- PUID=${PUID}
- PGID=${PGID}
- JELLYFIN_PublishedServerUrl=${JELLYFIN_URL}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8096/health"]
interval: 30s
timeout: 10s
retries: 3
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
profiles: ["media", "all"]
environment:
- LOG_LEVEL=info
- TZ=${TZ}
volumes:
- ${DOCKER_CONFIG_ROOT}/Arrs/Jellyseerr/config:/app/config
- ${TORRENTS_PATH}:/downloads
ports:
- "5055:5055"
networks:
arr_network:
ipv4_address: 172.20.0.14
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5055/api/v1/status"]
interval: 30s
timeout: 10s
retries: 3
# ===== DOWNLOAD CLIENT =====
qbittorrent:
container_name: qbittorrent
image: lscr.io/linuxserver/qbittorrent:latest
profiles: ["media", "all"]
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- WEBUI_PORT=8080
ports:
- "7070:8080"
- "56881:6881"
- "56881:6881/udp"
volumes:
- ./qBittorrent/config:/config
- ${TORRENTS_PATH}:/downloads
networks:
arr_network:
ipv4_address: 172.20.0.2
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080"]
interval: 30s
timeout: 10s
retries: 3
# ===== MUSIC SCROBBLING =====
multi-scrobbler:
image: foxxmd/multi-scrobbler:latest
container_name: multi-scrobbler
profiles: ["media", "all"]
environment:
- TZ=${TZ}
- PUID=${PUID}
- PGID=${PGID}
- BASE_URL=${SCROBBLE_URL}
- SPOTIFY_CLIENT_ID=${SPOTIFY_CLIENT_ID}
- SPOTIFY_CLIENT_SECRET=${SPOTIFY_CLIENT_SECRET}
- SPOTIFY_REDIRECT_URI=${SPOTIFY_REDIRECT_URI}
- MALOJA_URL=${MALOJA_URL}
- MALOJA_API_KEY=${MALOJA_API_KEY}
volumes:
- "./scrobble/config:/config"
ports:
- "9078:9078"
restart: unless-stopped
depends_on:
- maloja
maloja:
image: krateng/maloja:latest
container_name: maloja
profiles: ["media", "all"]
ports:
- "42010:42010"
volumes:
- "./maloja/config:/etc/maloja"
- "./maloja/data:/var/lib/maloja"
- "./maloja/logs:/var/log/maloja"
environment:
- MALOJA_FORCE_PASSWORD=${MALOJA_FORCE_PASSWORD}
restart: unless-stopped
networks:
arr_network:
external: true

84
compose/monitoring.yml Executable file
View File

@@ -0,0 +1,84 @@
# Monitoring & Maintenance Services
services:
# ===== NETWORK MONITORING =====
speedtest-tracker:
image: lscr.io/linuxserver/speedtest-tracker:latest
restart: unless-stopped
container_name: speedtest-tracker
profiles: ["monitoring", "all"]
ports:
- "8180:80"
- "8143:443"
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- APP_KEY=${SPEEDTEST_APP_KEY}
- DB_CONNECTION=mariadb
- DB_HOST=speedtest-db
- DB_PORT=3306
- DB_DATABASE=${SPEEDTEST_DB_NAME}
- DB_USERNAME=${SPEEDTEST_DB_USER}
- DB_PASSWORD=${SPEEDTEST_DB_PASSWORD}
- APP_DEBUG=false
- SPEEDTEST_SCHEDULE=${SPEEDTEST_SCHEDULE}
- SPEEDTEST_SERVERS=${SPEEDTEST_SERVERS}
- PUBLIC_DASHBOARD=true
volumes:
- ${DOCKER_CONFIG_ROOT}/speedtest-tracker/data:/config
depends_on:
speedtest-db:
condition: service_healthy
networks:
- database_network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/api/healthcheck"]
interval: 30s
timeout: 10s
retries: 3
speedtest-db:
image: mariadb:11
container_name: speedtest-db
profiles: ["monitoring", "all"]
restart: unless-stopped
environment:
- MYSQL_DATABASE=${SPEEDTEST_DB_NAME}
- MYSQL_USER=${SPEEDTEST_DB_USER}
- MYSQL_PASSWORD=${SPEEDTEST_DB_PASSWORD}
- MYSQL_RANDOM_ROOT_PASSWORD=true
volumes:
- ${DOCKER_CONFIG_ROOT}/speedtest-tracker/db:/var/lib/mysql
networks:
- database_network
healthcheck:
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
interval: 10s
timeout: 5s
retries: 10
start_period: 30s
security_opt:
- no-new-privileges:true
# ===== CONTAINER MAINTENANCE =====
watchtower:
image: containrrr/watchtower:latest
container_name: watchtower
profiles: ["monitoring", "all"]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- TZ=${TZ}
- WATCHTOWER_CLEANUP=${WATCHTOWER_CLEANUP}
- WATCHTOWER_POLL_INTERVAL=${WATCHTOWER_POLL_INTERVAL}
- WATCHTOWER_INCLUDE_RESTARTING=true
- WATCHTOWER_INCLUDE_STOPPED=false
- WATCHTOWER_REVIVE_STOPPED=false
- WATCHTOWER_NO_STARTUP_MESSAGE=true
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
database_network:
external: true

271
compose/utility-stack.yml Executable file
View File

@@ -0,0 +1,271 @@
# Utility Services - Dashboard, file management, sync, etc.
services:
# ===== DASHBOARD & MONITORING =====
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
profiles: ["utilities", "all"]
volumes:
- ./Homepage/config/images:/app/public/images
- ./Homepage/config/icons:/app/public/icons
- ./Homepage/config:/app/config
ports:
- "7575:3000"
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- HOMEPAGE_ALLOWED_HOSTS=${HOMEPAGE_ALLOWED_HOSTS}
restart: unless-stopped
depends_on:
- dockerproxy
dockerproxy:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: dockerproxy
profiles: ["utilities", "all"]
environment:
- CONTAINERS=${DOCKER_PROXY_CONTAINERS}
- SERVICES=${DOCKER_PROXY_SERVICES}
- TASKS=${DOCKER_PROXY_TASKS}
- POST=${DOCKER_PROXY_POST}
ports:
- "127.0.0.1:2375:2375"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
restart: unless-stopped
security_opt:
- no-new-privileges:true
# ===== FILE MANAGEMENT =====
filebrowser:
image: hurlenko/filebrowser:latest
container_name: filebrowser
profiles: ["utilities", "all"]
user: "${PUID}:${PGID}"
ports:
- "6633:8080"
volumes:
- ./filebrowser/data:/data
- ./filebrowser/config:/config
- /home/jamie:/data/home
- ${DATA_ROOT}:/data/media
environment:
- FB_BASEURL=/filebrowser
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
# ===== SYNCHRONIZATION =====
syncthing:
image: syncthing/syncthing:latest
container_name: syncthing
hostname: syncthing
profiles: ["utilities", "all"]
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ${DOCKER_CONFIG_ROOT}/syncthing:/var/syncthing
- ${DOCKER_CONFIG_ROOT}/obsidian/vaults:/var/syncthing/obsidian
ports:
- "8384:8384" # Web UI
- "22000:22000/tcp" # TCP file transfers
- "22000:22000/udp" # QUIC file transfers
- "21027:21027/udp" # Receive local discovery broadcasts
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8384/rest/system/ping"]
interval: 30s
timeout: 10s
retries: 3
# ===== PRODUCTIVITY =====
obsidian:
image: ghcr.io/sytone/obsidian-remote:latest
container_name: obsidian-remote
profiles: ["utilities", "all"]
restart: unless-stopped
ports:
- "8181:8080"
- "8443:8443"
volumes:
- ${DOCKER_CONFIG_ROOT}/obsidian/vaults:/vaults
- ${DOCKER_CONFIG_ROOT}/obsidian/config:/config
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- DOCKER_MODS=linuxserver/mods:universal-git
depends_on:
- syncthing
stirling-pdf:
container_name: stirling-pdf
image: docker.stirlingpdf.com/stirlingtools/stirling-pdf:latest
profiles: ["utilities", "all"]
deploy:
resources:
limits:
memory: 4G
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8080/api/v1/info/status | grep -q 'UP' && curl -fL http://localhost:8080/ | grep -qv 'Please sign in'"]
interval: 30s
timeout: 10s
retries: 5
ports:
- "8090:8080"
volumes:
- ./stirling/latest/data:/usr/share/tessdata:rw
- ./stirling/latest/config:/configs:rw
- ./stirling/latest/logs:/logs:rw
environment:
- DISABLE_ADDITIONAL_FEATURES=true
- SECURITY_ENABLELOGIN=false
- LANGS=en_GB,en_US,ar_AR,de_DE,fr_FR,es_ES,zh_CN,zh_TW,ca_CA,it_IT,sv_SE,pl_PL,ro_RO,ko_KR,pt_BR,ru_RU,el_GR,hi_IN,hu_HU,tr_TR,id_ID
- SYSTEM_DEFAULTLOCALE=en-US
- UI_APPNAME=Stirling-PDF
- UI_HOMEDESCRIPTION=Stirling-PDF
- UI_APPNAMENAVBAR=Stirling-PDF
- SYSTEM_MAXFILESIZE=100
- METRICS_ENABLED=true
- SYSTEM_GOOGLEVISIBILITY=true
- SHOW_SURVEY=true
restart: unless-stopped
# ===== REMOTE ACCESS =====
hbbr:
container_name: hbbr
image: rustdesk/rustdesk-server:latest
profiles: ["remote", "all"]
command: hbbr
environment:
- PUID=${PUID}
- PGID=${PGID}
volumes:
- ${DOCKER_CONFIG_ROOT}/rustdesk/data:/root
network_mode: "host"
restart: unless-stopped
hbbs:
container_name: hbbs
image: rustdesk/rustdesk-server:latest
profiles: ["remote", "all"]
command: hbbs
environment:
- PUID=${PUID}
- PGID=${PGID}
volumes:
- ${DOCKER_CONFIG_ROOT}/rustdesk/data:/root
network_mode: "host"
depends_on:
- hbbr
restart: unless-stopped
# ===== GENEALOGY =====
grampsweb:
container_name: gramps
image: ghcr.io/gramps-project/grampsweb:latest
profiles: ["genealogy", "all"]
restart: unless-stopped
ports:
- "5511:5000"
environment:
- GRAMPSWEB_TREE=Gramps Web
- GRAMPSWEB_CELERY_CONFIG__broker_url=redis://grampsweb_redis:6379/0
- GRAMPSWEB_CELERY_CONFIG__result_backend=redis://grampsweb_redis:6379/0
- GRAMPSWEB_RATELIMIT_STORAGE_URI=redis://grampsweb_redis:6379/1
depends_on:
- grampsweb_redis
volumes:
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_users:/app/users
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_index:/app/indexdir
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_thumb_cache:/app/thumbnail_cache
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_cache:/app/cache
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_secret:/app/secret
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_db:/root/.gramps/grampsdb
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_media:/app/media
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_tmp:/tmp
grampsweb_celery:
container_name: grampsweb_celery
image: ghcr.io/gramps-project/grampsweb:latest
profiles: ["genealogy", "all"]
restart: unless-stopped
environment:
- GRAMPSWEB_TREE=Gramps Web
- GRAMPSWEB_CELERY_CONFIG__broker_url=redis://grampsweb_redis:6379/0
- GRAMPSWEB_CELERY_CONFIG__result_backend=redis://grampsweb_redis:6379/0
- GRAMPSWEB_RATELIMIT_STORAGE_URI=redis://grampsweb_redis:6379/1
depends_on:
- grampsweb_redis
volumes:
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_users:/app/users
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_index:/app/indexdir
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_thumb_cache:/app/thumbnail_cache
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_cache:/app/cache
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_secret:/app/secret
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_db:/root/.gramps/grampsdb
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_media:/app/media
- ${DOCKER_CONFIG_ROOT}/gramps/gramps_tmp:/tmp
command: celery -A gramps_webapi.celery worker --loglevel=INFO --concurrency=2
grampsweb_redis:
image: redis:7.2.4-alpine
container_name: grampsweb_redis
profiles: ["genealogy", "all"]
restart: unless-stopped
volumes:
- grampsweb_redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
# ===== API SERVICES =====
racknerd-api-converter:
build: ./racknerd-converter
container_name: racknerd-api-converter
profiles: ["utilities", "all"]
ports:
- "5000:5000"
environment:
- RACKNERD_API_KEY=${RACKNERD_API_KEY}
- RACKNERD_API_HASH=${RACKNERD_API_HASH}
- RACKNERD_VSERVER_ID=${RACKNERD_VSERVER_ID}
- RACKNERD_BASE_URL=${RACKNERD_BASE_URL}
- UPDATE_INTERVAL=${RACKNERD_UPDATE_INTERVAL}
- HOST=0.0.0.0
- PORT=5000
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
# ===== MISC SERVICES =====
newt:
image: fosrl/newt:latest
container_name: newt
profiles: ["utilities", "all"]
restart: unless-stopped
environment:
- PANGOLIN_ENDPOINT=${PANGOLIN_URL}
- NEWT_ID=${NEWT_ID}
- NEWT_SECRET=${NEWT_SECRET}
volumes:
grampsweb_redis_data:
driver: local

31
docker-compose.yml Executable file
View File

@@ -0,0 +1,31 @@
# Main orchestration file - includes all service stacks
# Use profiles to control which services run in different environments
include:
- compose/media-stack.yml
- compose/utilities.yml
- compose/document-management.yml
- compose/monitoring.yml
# Global networks
networks:
arr_network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
database_network:
driver: bridge
ipam:
config:
- subnet: 172.21.0.0/16
# Global volumes
volumes:
aidata:
driver: local
dbdata:
driver: local
redisdata:
driver: local

Binary file not shown.

179
open-webui/Dockerfile Executable file
View File

@@ -0,0 +1,179 @@
# syntax=docker/dockerfile:1
# Initialize device type args
# use build args in the docker build command with --build-arg="BUILDARG=true"
ARG USE_CUDA=false
ARG USE_OLLAMA=false
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
ARG USE_CUDA_VER=cu128
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
# Leaderboard: https://huggingface.co/spaces/mteb/leaderboard
# for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
# IMPORTANT: If you change the embedding model (sentence-transformers/all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
ARG USE_RERANKING_MODEL=""
# Tiktoken encoding name; models to use can be found at https://huggingface.co/models?library=tiktoken
ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
ARG BUILD_HASH=dev-build
# Override at your own risk - non-root configurations are untested
ARG UID=0
ARG GID=0
######## WebUI frontend ########
FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build
ARG BUILD_HASH
WORKDIR /app
# to store git revision in build
RUN apk add --no-cache git
COPY package.json package-lock.json ./
RUN npm ci
COPY . .
ENV APP_BUILD_HASH=${BUILD_HASH}
RUN npm run build
######## WebUI backend ########
FROM python:3.11-slim-bookworm AS base
# Use args
ARG USE_CUDA
ARG USE_OLLAMA
ARG USE_CUDA_VER
ARG USE_EMBEDDING_MODEL
ARG USE_RERANKING_MODEL
ARG UID
ARG GID
## Basis ##
ENV ENV=prod \
PORT=8080 \
# pass build args to the build
USE_OLLAMA_DOCKER=${USE_OLLAMA} \
USE_CUDA_DOCKER=${USE_CUDA} \
USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL} \
USE_RERANKING_MODEL_DOCKER=${USE_RERANKING_MODEL}
## Basis URL Config ##
ENV OLLAMA_BASE_URL="/ollama" \
OPENAI_API_BASE_URL=""
## API Key and Security Config ##
ENV OPENAI_API_KEY="" \
WEBUI_SECRET_KEY="" \
SCARF_NO_ANALYTICS=true \
DO_NOT_TRACK=true \
ANONYMIZED_TELEMETRY=false
#### Other models #########################################################
## whisper TTS model settings ##
ENV WHISPER_MODEL="base" \
WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
## RAG Embedding model settings ##
ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
RAG_RERANKING_MODEL="$USE_RERANKING_MODEL_DOCKER" \
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
## Tiktoken model settings ##
ENV TIKTOKEN_ENCODING_NAME="cl100k_base" \
TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken"
## Hugging Face download cache ##
ENV HF_HOME="/app/backend/data/cache/embedding/models"
## Torch Extensions ##
# ENV TORCH_EXTENSIONS_DIR="/.cache/torch_extensions"
#### Other models ##########################################################
WORKDIR /app/backend
ENV HOME=/root
# Create user and group if not root
RUN if [ $UID -ne 0 ]; then \
if [ $GID -ne 0 ]; then \
addgroup --gid $GID app; \
fi; \
adduser --uid $UID --gid $GID --home $HOME --disabled-password --no-create-home app; \
fi
RUN mkdir -p $HOME/.cache/chroma
RUN echo -n 00000000-0000-0000-0000-000000000000 > $HOME/.cache/chroma/telemetry_user_id
# Make sure the user has access to the app and root directory
RUN chown -R $UID:$GID /app $HOME
RUN if [ "$USE_OLLAMA" = "true" ]; then \
apt-get update && \
# Install pandoc and netcat
apt-get install -y --no-install-recommends git build-essential pandoc netcat-openbsd curl && \
apt-get install -y --no-install-recommends gcc python3-dev && \
# for RAG OCR
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
# install helper tools
apt-get install -y --no-install-recommends curl jq && \
# install ollama
curl -fsSL https://ollama.com/install.sh | sh && \
# cleanup
rm -rf /var/lib/apt/lists/*; \
else \
apt-get update && \
# Install pandoc, netcat and gcc
apt-get install -y --no-install-recommends git build-essential pandoc gcc netcat-openbsd curl jq && \
apt-get install -y --no-install-recommends gcc python3-dev && \
# for RAG OCR
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
# cleanup
rm -rf /var/lib/apt/lists/*; \
fi
# install python dependencies
COPY --chown=$UID:$GID ./backend/requirements.txt ./requirements.txt
RUN pip3 install --no-cache-dir uv && \
if [ "$USE_CUDA" = "true" ]; then \
# If you use CUDA the whisper and embedding model will be downloaded on first use
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
uv pip install --system -r requirements.txt --no-cache-dir && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
else \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
uv pip install --system -r requirements.txt --no-cache-dir && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
fi; \
chown -R $UID:$GID /app/backend/data/
# copy embedding weight from build
# RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
# COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx
# copy built frontend files
COPY --chown=$UID:$GID --from=build /app/build /app/build
COPY --chown=$UID:$GID --from=build /app/CHANGELOG.md /app/CHANGELOG.md
COPY --chown=$UID:$GID --from=build /app/package.json /app/package.json
# copy backend files
COPY --chown=$UID:$GID ./backend .
EXPOSE 8080
HEALTHCHECK CMD curl --silent --fail http://localhost:${PORT:-8080}/health | jq -ne 'input.status == true' || exit 1
USER $UID:$GID
ARG BUILD_HASH
ENV WEBUI_BUILD_VERSION=${BUILD_HASH}
ENV DOCKER=true
CMD [ "bash", "start.sh"]

34
racknerd-converter/Dockerfile Executable file
View File

@@ -0,0 +1,34 @@
# Use Python 3.11 slim image
FROM python:3.11-slim
# Set working directory
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
curl \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements file
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy the application
COPY kvmapiconv.py .
# Create a non-root user
RUN useradd --create-home --shell /bin/bash appuser && \
chown -R appuser:appuser /app
USER appuser
# Expose port
EXPOSE 5000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:5000/health || exit 1
# Run the application
CMD ["python", "kvmapiconv.py"]

152
racknerd-converter/README.md Executable file
View File

@@ -0,0 +1,152 @@
# RackNerd KVM API Converter
A Python Flask service that converts RackNerd KVM XML API responses to JSON format for Homepage widgets.
## Features
- Converts RackNerd XML API responses to clean JSON format
- Caches data to reduce API calls (configurable interval, default 30 minutes)
- RESTful API endpoints for Homepage integration
- Docker containerized for easy deployment
- Health checks and error handling
- Automatic background data updates
## API Endpoints
- `GET /api/kvm` - Get cached KVM data in JSON format
- `GET /api/kvm/raw` - Get fresh data (bypass cache)
- `GET /api/kvm/status` - Service status and metadata
- `GET /health` - Health check endpoint
## Quick Start
### Using Docker Compose (Recommended)
1. Clone/download the files
2. Update the environment variables in `docker-compose.yml` with your RackNerd API credentials
3. Run:
```bash
docker-compose up -d
```
### Using Docker
```bash
# Build the image
docker build -t racknerd-api-converter .
# Run the container
docker run -d \
-p 5000:5000 \
-e RACKNERD_API_KEY=your_api_key \
-e RACKNERD_API_HASH=your_api_hash \
-e RACKNERD_VSERVER_ID=your_vserver_id \
-e UPDATE_INTERVAL=1800 \
--name racknerd-converter \
racknerd-api-converter
```
### Manual Python Setup
```bash
pip install -r requirements.txt
export RACKNERD_API_KEY=your_api_key
export RACKNERD_API_HASH=your_api_hash
export RACKNERD_VSERVER_ID=your_vserver_id
python app.py
```
## Configuration
Configure via environment variables:
| Variable | Description | Default |
|----------|-------------|---------|
| `RACKNERD_API_KEY` | Your RackNerd API key | A0ZJA-FSJXW-QXV7R |
| `RACKNERD_API_HASH` | Your RackNerd API hash | fce545debdab0edf2565788277d3670e1afd8823 |
| `RACKNERD_VSERVER_ID` | Your VServer ID | 476515 |
| `RACKNERD_BASE_URL` | RackNerd API base URL | https://nerdvm.racknerd.com/api/client/command.php |
| `UPDATE_INTERVAL` | Cache update interval (seconds) | 1800 (30 minutes) |
| `HOST` | Server host | 0.0.0.0 |
| `PORT` | Server port | 5000 |
## Homepage Widget Configuration
Add this to your Homepage `services.yaml`:
```yaml
- KVM Server:
icon: server
href: http://your-server:5000
ping: http://your-server:5000
widget:
type: customapi
url: http://your-server:5000/api/kvm
refreshInterval: 30000
mappings:
- field: hostname
label: Hostname
- field: status
label: Status
- field: uptime
label: Uptime
- field: cpu_usage
label: CPU Usage
suffix: "%"
- field: memory_usage
label: Memory
suffix: "%"
```
## API Response Format
The service converts XML responses to JSON. Example response:
```json
{
"vserver": {
"hostname": "your-server",
"status": "online",
"uptime": "15 days",
"cpu_usage": 25.5,
"memory": {
"total": "4096MB",
"used": "2048MB",
"free": "2048MB"
},
"bandwidth": {
"used": "150GB",
"total": "1000GB"
}
},
"_metadata": {
"last_updated": "2024-01-15T10:30:00Z",
"source": "racknerd_api",
"vserver_id": "476515"
}
}
```
## Monitoring
- Check service health: `GET /health`
- Monitor logs: `docker logs racknerd-api-converter`
- Service status: `GET /api/kvm/status`
## Security Notes
- Keep your API credentials secure
- Consider using Docker secrets for production
- The service runs as a non-root user
- Network access is limited to necessary ports
## Troubleshooting
1. **No data returned**: Check API credentials and network connectivity
2. **XML parsing errors**: Verify API response format hasn't changed
3. **Container won't start**: Check environment variables and port conflicts
4. **Homepage not updating**: Verify URL and check service logs
## Contributing
Feel free to submit issues and enhancement requests!

315
racknerd-converter/kvmapiconv.py Executable file
View File

@@ -0,0 +1,315 @@
#!/usr/bin/env python3
import requests
import xml.etree.ElementTree as ET
import json
import time
import threading
from flask import Flask, jsonify, request
from datetime import datetime
import logging
import os
from typing import Dict, Any, Optional
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
app = Flask(__name__)
class RackNerdAPIConverter:
def __init__(self):
# Load configuration from environment variables
self.api_key = os.getenv('RACKNERD_API_KEY', 'A0ZJA-FSJXW-QXV7R')
self.api_hash = os.getenv('RACKNERD_API_HASH', 'fce545debdab0edf2565788277d3670e1afd8823')
self.vserver_id = os.getenv('RACKNERD_VSERVER_ID', '476515')
self.base_url = os.getenv('RACKNERD_BASE_URL', 'https://nerdvm.racknerd.com/api/client/command.php')
self.update_interval = int(os.getenv('UPDATE_INTERVAL', '1800')) # 30 minutes default
self.cached_data = {}
self.last_update = None
self.lock = threading.Lock()
def fetch_kvm_data(self) -> Optional[Dict[str, Any]]:
"""Fetch data from RackNerd API and convert XML to JSON"""
try:
# Construct the API URL
params = {
'key': self.api_key,
'hash': self.api_hash,
'action': 'info',
'vserverid': self.vserver_id,
'bw': 'true'
}
logger.info(f"Fetching data from RackNerd API for vserver {self.vserver_id}")
# Make the API request
response = requests.get(self.base_url, params=params, timeout=30)
response.raise_for_status()
# Clean up the response text
xml_text = response.text.strip()
# Handle potential HTML/error responses
if xml_text.lower().startswith('<!doctype') or xml_text.lower().startswith('<html'):
logger.error("Received HTML response instead of XML - check API credentials")
return {
'error': 'Invalid API Response',
'message': 'Received HTML instead of XML. Check your API credentials.',
'raw_response': xml_text[:500]
}
# RackNerd returns XML fragments without root element, so wrap them
if not xml_text.startswith('<?xml') and not xml_text.startswith('<root>'):
xml_text = f"<root>{xml_text}</root>"
logger.info("Wrapped XML fragments in root element")
# Parse XML response
root = ET.fromstring(xml_text)
# Convert XML to JSON
json_data = self.xml_to_dict(root)
# Parse bandwidth data if present
if 'bw' in json_data and isinstance(json_data['bw'], str):
bw_parts = json_data['bw'].split(',')
if len(bw_parts) >= 4:
json_data['bandwidth'] = {
'total_bytes': int(bw_parts[0]) if bw_parts[0].isdigit() else bw_parts[0],
'used_bytes': int(bw_parts[1]) if bw_parts[1].isdigit() else bw_parts[1],
'total_formatted': self.format_bytes(int(bw_parts[0])) if bw_parts[0].isdigit() else bw_parts[0],
'used_formatted': self.format_bytes(int(bw_parts[1])) if bw_parts[1].isdigit() else bw_parts[1],
'remaining_bytes': int(bw_parts[0]) - int(bw_parts[1]) if bw_parts[0].isdigit() and bw_parts[1].isdigit() else 0,
'usage_percent': round((int(bw_parts[1]) / int(bw_parts[0])) * 100, 2) if bw_parts[0].isdigit() and bw_parts[1].isdigit() and int(bw_parts[0]) > 0 else 0
}
if json_data['bandwidth']['remaining_bytes'] > 0:
json_data['bandwidth']['remaining_formatted'] = self.format_bytes(json_data['bandwidth']['remaining_bytes'])
# Add metadata
json_data['_metadata'] = {
'last_updated': datetime.utcnow().isoformat() + 'Z',
'source': 'racknerd_api',
'vserver_id': self.vserver_id,
'raw_response_length': len(response.text)
}
logger.info("Successfully converted XML to JSON")
return json_data
except requests.exceptions.RequestException as e:
logger.error(f"API request failed: {e}")
return {
'error': 'API Request Failed',
'message': str(e),
'timestamp': datetime.utcnow().isoformat() + 'Z'
}
except ET.ParseError as e:
logger.error(f"XML parsing failed: {e}")
logger.error(f"Raw response that failed to parse: {response.text}")
return {
'error': 'XML Parse Error',
'message': str(e),
'raw_response': response.text[:1000],
'timestamp': datetime.utcnow().isoformat() + 'Z'
}
except Exception as e:
logger.error(f"Unexpected error: {e}")
return {
'error': 'Unexpected Error',
'message': str(e),
'timestamp': datetime.utcnow().isoformat() + 'Z'
}
def xml_to_dict(self, element) -> Dict[str, Any]:
"""Recursively convert XML element to dictionary"""
result = {}
# Handle element text
if element.text and element.text.strip():
# Try to convert to appropriate type
text = element.text.strip()
if text.lower() in ['true', 'false']:
result['_text'] = text.lower() == 'true'
elif text.isdigit():
result['_text'] = int(text)
elif self.is_float(text):
result['_text'] = float(text)
else:
result['_text'] = text
# Handle attributes
if element.attrib:
result['_attributes'] = element.attrib
# Handle child elements
children = {}
for child in element:
child_data = self.xml_to_dict(child)
if child.tag in children:
# Handle multiple children with same tag
if not isinstance(children[child.tag], list):
children[child.tag] = [children[child.tag]]
children[child.tag].append(child_data)
else:
children[child.tag] = child_data
if children:
result.update(children)
# If only text content, return it directly
if len(result) == 1 and '_text' in result:
return result['_text']
return result
def format_bytes(self, bytes_value: int) -> str:
"""Convert bytes to human readable format"""
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
if bytes_value < 1024.0:
return f"{bytes_value:.1f} {unit}"
bytes_value /= 1024.0
return f"{bytes_value:.1f} PB"
def is_float(self, value: str) -> bool:
"""Check if string can be converted to float"""
try:
float(value)
return True
except ValueError:
return False
def update_cache(self):
"""Update cached data"""
with self.lock:
data = self.fetch_kvm_data()
if data:
self.cached_data = data
self.last_update = datetime.utcnow()
logger.info("Cache updated successfully")
else:
logger.error("Failed to update cache")
def get_cached_data(self) -> Dict[str, Any]:
"""Get cached data with thread safety"""
with self.lock:
if not self.cached_data:
return {
'error': 'No data available',
'message': 'Initial data fetch in progress'
}
return self.cached_data.copy()
def start_background_updates(self):
"""Start background thread for periodic updates"""
def update_loop():
while True:
self.update_cache()
time.sleep(self.update_interval)
thread = threading.Thread(target=update_loop, daemon=True)
thread.start()
logger.info(f"Background updates started (interval: {self.update_interval} seconds)")
# Initialize converter
converter = RackNerdAPIConverter()
@app.route('/health', methods=['GET'])
def health_check():
"""Health check endpoint"""
return jsonify({
'status': 'healthy',
'timestamp': datetime.utcnow().isoformat() + 'Z',
'last_update': converter.last_update.isoformat() + 'Z' if converter.last_update else None
})
@app.route('/api/kvm', methods=['GET'])
def get_kvm_data():
"""Main endpoint to get KVM data in JSON format"""
return jsonify(converter.get_cached_data())
@app.route('/api/kvm/raw', methods=['GET'])
def get_raw_kvm_data():
"""Endpoint to get fresh data (bypass cache)"""
data = converter.fetch_kvm_data()
if data:
return jsonify(data)
else:
return jsonify({
'error': 'Failed to fetch data',
'message': 'Unable to retrieve data from RackNerd API'
}), 500
@app.route('/api/kvm/debug', methods=['GET'])
def debug_api():
"""Debug endpoint to see raw API response"""
try:
params = {
'key': converter.api_key,
'hash': converter.api_hash,
'action': 'info',
'vserverid': converter.vserver_id,
'bw': 'true'
}
response = requests.get(converter.base_url, params=params, timeout=30)
return jsonify({
'status_code': response.status_code,
'headers': dict(response.headers),
'raw_content': response.text,
'content_length': len(response.text),
'url': response.url
})
except Exception as e:
return jsonify({
'error': 'Debug request failed',
'message': str(e)
}), 500
@app.route('/api/kvm/status', methods=['GET'])
def get_status():
"""Get service status and metadata"""
with converter.lock:
return jsonify({
'service': 'racknerd-api-converter',
'status': 'running',
'vserver_id': converter.vserver_id,
'update_interval': converter.update_interval,
'last_update': converter.last_update.isoformat() + 'Z' if converter.last_update else None,
'has_cached_data': bool(converter.cached_data),
'timestamp': datetime.utcnow().isoformat() + 'Z'
})
@app.errorhandler(404)
def not_found(error):
return jsonify({
'error': 'Not found',
'message': 'The requested endpoint does not exist'
}), 404
@app.errorhandler(500)
def internal_error(error):
return jsonify({
'error': 'Internal server error',
'message': 'An unexpected error occurred'
}), 500
if __name__ == '__main__':
# Initial data fetch
logger.info("Starting RackNerd API Converter...")
converter.update_cache()
# Start background updates
converter.start_background_updates()
# Start Flask app
port = int(os.getenv('PORT', '5000'))
host = os.getenv('HOST', '0.0.0.0')
logger.info(f"Starting server on {host}:{port}")
app.run(host=host, port=port, debug=False)

View File

@@ -0,0 +1,3 @@
Flask==2.3.3
requests==2.31.0
Werkzeug==2.3.7

1
secrets/authelia_jwt_secret Executable file
View File

@@ -0,0 +1 @@
i5Dssz20RgmarmAl9Mo1ugp6HxxStmcJtWWkrbuS06ap2pOlmROCfScIEsHuyBXA

View File

@@ -0,0 +1 @@
B6vqqwdsRmdYOhUzo5RUgioAYaKdwZpVSmi5PCFKxYIko1LtazXjaERnKCzG3zl8

View File

@@ -0,0 +1 @@
nMtLQfZ4v8SH63aL54mM8EOtg2eLrobnElYtjsyKjBRzBa8CyDc4EwYfJZpgUmal

205
setup.sh Executable file
View File

@@ -0,0 +1,205 @@
#!/bin/bash
# ===========================================
# Docker Media Stack Setup Script
# ===========================================
set -e # Exit on any error
echo "🚀 Setting up Docker Media & Utility Stack..."
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if Docker and Docker Compose are installed
check_dependencies() {
print_status "Checking dependencies..."
if ! command -v docker &> /dev/null; then
print_error "Docker is not installed. Please install Docker first."
exit 1
fi
if ! command -v docker compose &> /dev/null; then
print_error "Docker Compose is not installed. Please install Docker Compose first."
exit 1
fi
print_status "Dependencies check passed ✓"
}
# Create necessary directories
create_directories() {
print_status "Creating directory structure..."
# Main data directories
sudo mkdir -p /data/{movies,tv,music,torrents}
# Docker config directories
mkdir -p ../docker-local/{Arrs/{Sonarr,Radarr,Lidarr,Bazarr,Prowlarr,Jellyfin,Jellyseerr}/config,speedtest-tracker/{data,db},rustdesk/data,gramps,syncthing,obsidian/{vaults,config}}
# Application specific directories
mkdir -p {qBittorrent,Homepage,filebrowser,maloja,scrobble,stirling/latest,paperless,gramps,racknerd-converter}/config
mkdir -p {Homepage/config/{images,icons},filebrowser/data,maloja/{data,logs},stirling/latest/{data,config,logs},paperless/{data,media,export,consume},racknerd-converter/{data,logs}}
print_status "Directories created ✓"
}
# Set proper permissions
set_permissions() {
print_status "Setting permissions..."
# Get current user ID and group ID
CURRENT_UID=$(id -u)
CURRENT_GID=$(id -g)
# Set ownership for data directories
if [ -d "/data" ]; then
sudo chown -R $CURRENT_UID:$CURRENT_GID /data
fi
# Set ownership for config directories
if [ -d "../docker-local" ]; then
chown -R $CURRENT_UID:$CURRENT_GID ../docker-local
fi
# Set ownership for local directories
chown -R $CURRENT_UID:$CURRENT_GID .
print_status "Permissions set ✓"
}
# Validate environment file
validate_environment() {
print_status "Validating environment configuration..."
if [ ! -f ".env" ]; then
print_error ".env file not found. Please ensure it exists."
exit 1
fi
# Check for required variables
required_vars=("TZ" "PUID" "PGID" "DOMAIN" "DATA_ROOT")
for var in "${required_vars[@]}"; do
if ! grep -q "^${var}=" .env; then
print_error "Required environment variable $var not found in .env"
exit 1
fi
done
print_status "Environment validation passed ✓"
}
# Pull latest images
pull_images() {
print_status "Pulling latest Docker images..."
docker compose pull
print_status "Images pulled ✓"
}
# Start services
start_services() {
print_status "Starting services..."
# Create networks first
docker network create arr_network --subnet=172.20.0.0/16 2>/dev/null || true
docker network create database_network --subnet=172.21.0.0/16 2>/dev/null || true
# Start all services
docker compose up -d
print_status "Services started ✓"
}
# Display service URLs
show_services() {
echo
print_status "🎉 Setup complete! Your services are available at:"
echo
echo -e "${BLUE}📊 Dashboard & Management:${NC}"
echo " • Homepage (Dashboard): http://localhost:7575"
echo " • FileBrowser: http://localhost:6633"
echo
echo -e "${BLUE}🎬 Media Services:${NC}"
echo " • Jellyfin (Media Server): http://localhost:8096"
echo " • Jellyseerr (Requests): http://localhost:5055"
echo " • Sonarr (TV Shows): http://localhost:8989"
echo " • Radarr (Movies): http://localhost:7878"
echo " • Lidarr (Music): http://localhost:8686"
echo " • Bazarr (Subtitles): http://localhost:6767"
echo " • Prowlarr (Indexers): http://localhost:9696"
echo " • qBittorrent: http://localhost:7070"
echo
echo -e "${BLUE}📄 Document Management:${NC}"
echo " • Paperless NGX: http://localhost:8100"
echo " • Paperless AI: http://localhost:3040"
echo " • Stirling PDF: http://localhost:8090"
echo
echo -e "${BLUE}🛠️ Utilities:${NC}"
echo " • Speedtest Tracker: http://localhost:8180"
echo " • Syncthing: http://localhost:8384"
echo " • Obsidian Remote: http://localhost:8181"
echo " • Gramps (Genealogy): http://localhost:5511"
echo
echo -e "${BLUE}🎵 Music Services:${NC}"
echo " • Multi-Scrobbler: http://localhost:9078"
echo " • Maloja: http://localhost:42010"
echo
echo -e "${BLUE}🖥️ Server Monitoring:${NC}"
echo " • RackNerd API: http://localhost:5000"
echo
print_warning "Some services may take a few minutes to fully initialize."
echo
echo -e "${GREEN}💡 Useful commands:${NC}"
echo " • View status: docker compose ps"
echo " • View logs: docker compose logs [service]"
echo " • Stop all: docker compose down"
echo " • Update services: docker compose pull && docker compose up -d"
}
# Main execution
main() {
echo -e "${BLUE}"
echo "╔══════════════════════════════════════════════════╗"
echo "║ Docker Media Stack Setup ║"
echo "║ ║"
echo "║ This script will set up your complete ║"
echo "║ media and utility stack with Docker Compose ║"
echo "╚══════════════════════════════════════════════════╝"
echo -e "${NC}"
check_dependencies
validate_environment
create_directories
set_permissions
pull_images
start_services
show_services
echo
print_status "🎯 Setup completed successfully!"
print_warning "Remember to configure your services through their web interfaces."
echo
}
# Run main function
main "$@"