Deleted all the services!

This commit is contained in:
Sönke Domröas
2026-02-18 11:18:18 +01:00
parent 8902785cd3
commit 6ca32f3d2c
38 changed files with 130 additions and 608 deletions

Submodule McpDiceRoller deleted from 4fa59ed153

View File

@@ -1,5 +0,0 @@
sudo apt install wget lsscsi
lsscsi -g
wget https://raw.githubusercontent.com/automatic-ripping-machine/automatic-ripping-machine/main/scripts/docker-setup.sh
sudo chmod +x docker-setup.sh

View File

@@ -1,102 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
RED='\033[1;31m'
NC='\033[0m' # No Color
FORK=automaticrippingmachine
TAG=latest
function usage() {
echo -e "\nUsage: docker_setup.sh [OPTIONS]"
echo -e " -f <fork>\tSpecify the fork to pull from on DockerHub. \n\t\tDefault is \"$FORK\""
echo -e " -t <tag>\tSpecify the tag to pull from on DockerHub. \n\t\tDefault is \"$TAG\""
}
while getopts 'f:t:' OPTION
do
case $OPTION in
f) FORK=$OPTARG
;;
t) TAG=$OPTARG
;;
?) usage
exit 2
;;
esac
done
IMAGE="$FORK/automatic-ripping-machine:$TAG"
function install_reqs() {
apt update -y && apt upgrade -y
apt install -y curl lsscsi
}
function add_arm_user() {
echo -e "${RED}Adding arm user${NC}"
# create arm group if it doesn't already exist
if ! [[ "$(getent group arm)" ]]; then
groupadd arm
else
echo -e "${RED}arm group already exists, skipping...${NC}"
fi
# create arm user if it doesn't already exist
if ! id arm >/dev/null 2>&1; then
useradd -m arm -g arm
passwd arm
else
echo -e "${RED}arm user already exists, skipping...${NC}"
fi
usermod -aG cdrom,video arm
}
function launch_setup() {
# install docker
if [ -e /usr/bin/docker ]; then
echo -e "${RED}Docker installation detected, skipping...${NC}"
echo -e "${RED}Adding user arm to docker user group${NC}"
usermod -aG docker arm
else
echo -e "${RED}Installing Docker${NC}"
# the convenience script auto-detects OS and handles install accordingly
curl -sSL https://get.docker.com | bash
echo -e "${RED}Adding user arm to docker user group${NC}"
usermod -aG docker arm
fi
}
function pull_image() {
echo -e "${RED}Pulling image from $IMAGE${NC}"
sudo -u arm docker pull "$IMAGE"
}
function setup_mountpoints() {
echo -e "${RED}Creating mount points${NC}"
for dev in /dev/sr?; do
mkdir -p "/mnt$dev"
done
chown arm:arm /mnt/dev/sr*
}
function save_start_command() {
url="https://raw.githubusercontent.com/automatic-ripping-machine/automatic-ripping-machine/main/scripts/docker/start_arm_container.sh"
cd ~arm
if [ -e start_arm_container.sh ]
then
echo -e "'start_arm_container.sh' already exists. Backing up..."
sudo mv ./start_arm_container.sh ./start_arm_container.sh.bak
fi
sudo -u arm curl -fsSL "$url" -o start_arm_container.sh
chmod +x start_arm_container.sh
sed -i "s|IMAGE_NAME|${IMAGE}|" start_arm_container.sh
}
# start here
install_reqs
add_arm_user
launch_setup
pull_image
setup_mountpoints
save_start_command
echo -e "${RED}Installation complete. A template command to run the ARM container is located in: $(echo ~arm) ${NC}"

View File

View File

@@ -7,8 +7,6 @@ auth.home.domroese.eu {
}
beszel.domr.ovh,
beszel.home.domroese.eu {
tls soenke@domroese.eu
@@ -40,18 +38,6 @@ chartbrew.home.domroese.eu:4019 {
reverse_proxy 192.168.1.65:4019
}
convertx.domr.ovh,
convertx.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3410
}
dailytxt.domr.ovh,
dailytxt.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8317
}
dashy.domr.ovh, #donetick
dashy.home.domroese.eu:443 {
tls soenke@domroese.eu
@@ -83,18 +69,13 @@ git.home.domroese.eu {
reverse_proxy 192.168.1.194:8418
}
guac.domr.ovh,
guac.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:6080
}
haus.domr.ovh,
haus.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8472
}
homebox.domr.ovh,
homebox.home.domroese.eu:443 {
tls soenke@domroese.eu
@@ -108,10 +89,10 @@ homepage.home.domroese.eu:443 {
reverse_proxy 192.168.1.65:3891
}
huly.domr.ovh,
huly.home.domroese.eu {
immich.domr.ovh,
immich.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8087
reverse_proxy 192.168.1.65:2283
}
ittools.domr.ovh:443,
@@ -128,13 +109,6 @@ journiv.domr.ovh {
reverse_proxy 192.168.1.65:8198
}
kopia.domr.ovh,
kopia.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:51515
}
mealie.domr.ovh,
mealie.home.domroese.eu:443 {
@@ -150,8 +124,6 @@ memos.home.domroese.eu:443 {
nas.domr.ovh,
nas.home.domroese.eu {
tls soenke@domroese.eu {
@@ -198,18 +170,6 @@ pihole.home.domroese.eu {
plantit.domr.ovh,
plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3632
}
api.plantit.domr.ovh,
api.plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8632
}
portracker.domr.ovh,
portracker.home.domroese.eu:443 {
tls soenke@domroese.eu
@@ -249,7 +209,6 @@ vault.home.domroese.eu:80 {
reverse_proxy 192.168.1.65:4080
}
wallos.domr.ovh,
wallos.home.domroese.eu:443 {
tls soenke@domroese.eu

View File

@@ -7,8 +7,6 @@ auth.home.domroese.eu {
}
beszel.domr.ovh,
beszel.home.domroese.eu {
tls soenke@domroese.eu
@@ -40,18 +38,6 @@ chartbrew.home.domroese.eu:4019 {
reverse_proxy 192.168.1.65:4019
}
convertx.domr.ovh,
convertx.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3410
}
dailytxt.domr.ovh,
dailytxt.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8317
}
dashy.domr.ovh, #donetick
dashy.home.domroese.eu:443 {
tls soenke@domroese.eu
@@ -77,24 +63,12 @@ rss.home.domroese.eu {
}
git.domr.ovh,
git.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.194:8418
}
guac.domr.ovh,
guac.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:6080
}
haus.domr.ovh,
haus.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8472
}
homebox.domr.ovh,
homebox.home.domroese.eu:443 {
tls soenke@domroese.eu
@@ -108,10 +82,10 @@ homepage.home.domroese.eu:443 {
reverse_proxy 192.168.1.65:3891
}
huly.domr.ovh,
huly.home.domroese.eu {
immich.domr.ovh,
immich.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8087
reverse_proxy 192.168.1.65:2283
}
ittools.domr.ovh:443,
@@ -128,13 +102,6 @@ journiv.domr.ovh {
reverse_proxy 192.168.1.65:8198
}
kopia.domr.ovh,
kopia.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:51515
}
mealie.domr.ovh,
mealie.home.domroese.eu:443 {
@@ -150,8 +117,6 @@ memos.home.domroese.eu:443 {
nas.domr.ovh,
nas.home.domroese.eu {
tls soenke@domroese.eu {
@@ -198,18 +163,6 @@ pihole.home.domroese.eu {
plantit.domr.ovh,
plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3632
}
api.plantit.domr.ovh,
api.plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8632
}
portracker.domr.ovh,
portracker.home.domroese.eu:443 {
tls soenke@domroese.eu
@@ -249,7 +202,6 @@ vault.home.domroese.eu:80 {
reverse_proxy 192.168.1.65:4080
}
wallos.domr.ovh,
wallos.home.domroese.eu:443 {
tls soenke@domroese.eu

View File

@@ -1,5 +0,0 @@
convertx.domr.ovh,
convertx.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3410
}

View File

@@ -1,21 +0,0 @@
services:
convertx:
image: ghcr.io/c4illin/convertx
container_name: convertx
restart: unless-stopped
ports:
- "3410:3000"
environment:
- JWT_SECRET=a1fd043661609d72a0447a1c2c1b1fc3ac6e1e610ef76af82c4239f59a512ae8f0b5e1d080011567a4b47bc27eeaa79e9653a8caa574957a575c91609ed881cfd96dd7dbed388d0dbada10787b00876d40415efd2f01d131b6de0b4f5e67ea55bf35d69b778aedde31c7f245972a352b713984ee63733d22ca9399940af70c3546b37d3afaa24158547238064b42a4aae9e283c3087a9742b6bda3401c2710bd138c4d90718726b7927c4f13cfbea2b55b85149360dc435257c4d16a31a7e5881806037d2f06c40e7bc5c5a1904a2c8e6c7e35998228fdf6be73b52c76aad82fb0f906d225503adda7e2aed65212b0cdca25c19182cb21957677c36a6c53cd46
volumes:
- /home/soenke/docker-data/convertx/data:/app/data
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.convertx.http.name: 'convertx'
kuma.convertx.http.url: 'https://convertx.domr.ovh'
kuma.convertx.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'

View File

@@ -1 +0,0 @@
ADMIN_PW_DAILYTXT="Diavid9600"

View File

@@ -1,5 +0,0 @@
dailytxt.domr.ovh,
dailytxt.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8317
}

View File

@@ -1,36 +0,0 @@
services:
dailytxt:
image: phitux/dailytxt:latest
container_name: dailytxt
restart: always
env_file:
- .env
environment:
# That's the internal container-port. You can actually use any portnumber (must match with the one at 'ports')
- PORT=8317
- SECRET_KEY="O+EuLJXNAIxIT7puvNU5KVC4sh3JPRorTMCDRAkekho="
# Set it to False or remove the line completely to disallow registration of new users.
- ALLOW_REGISTRATION=True
# Use this if you want the json log file to be indented. Makes it easier to compare the files. Otherwise just remove this line!
- DATA_INDENT=2
# Set after how many days the JWT token will expire and you have to re-login. Defaults to 30 days if line is ommited.
- JWT_EXP_DAYS=60
# Enable/disable a feature of DailyTxT to auto-check maximal once per hour if there's a newer version of DailyTxT available. Defaults to True if line is ommited.
- ENABLE_UPDATE_CHECK=True
- ADMIN_PASSWORD=${ADMIN_PW_DAILYTXT}
ports:
- "8317:8765"
# perhaps you only want:
# "<host_port>:8765"
volumes:
- "/home/soenke/docker-data/dailytxt/:/app/data/"
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.work.tag.name: 'Work'
kuma.work.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.dailytxt.http.name: 'DailyTxT'
kuma.dailytxt.http.url: 'https://dailytxt.domr.ovh'
kuma.dailytxt.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'

View File

@@ -3,3 +3,4 @@ git.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.194:8418
}

View File

@@ -107,9 +107,9 @@ services:
container_name: postgres_guacamole_compose
environment:
PGDATA: /var/lib/postgresql/data/guacamole
POSTGRES_DB: guacamole_db
POSTGRES_PASSWORD: 'ChooseYourOwnPasswordHere1234'
POSTGRES_USER: guacamole_user
POSTGRESQL_DB: guacamole_db
POSTGRESQL_PASSWORD: 'quvt2s1UQgZjhvCl5e8H2s6X8FPnoULO'
POSTGRESQL_USERNAME: guacamole_hans
image: postgres:15.2-alpine
networks:
- guacnetwork_compose
@@ -126,10 +126,18 @@ services:
- postgres
environment:
GUACD_HOSTNAME: guacd
POSTGRES_DATABASE: guacamole_db
POSTGRES_HOSTNAME: postgres
POSTGRES_PASSWORD: 'ChooseYourOwnPasswordHere1234'
POSTGRES_USER: guacamole_user
POSTGRESQL_DATABASE: guacamole_db
POSTGRESQL_HOSTNAME: postgres
POSTGRESQL_PASSWORD: 'quvt2s1UQgZjhvCl5e8H2s6X8FPnoULO'
POSTGRESQL_USERNAME: guacamole_hans
OPENID_AUTHORIZATION_ENDPOINT: https://auth.domr.ovh/application/o/authorize/
OPENID_CLIENT_ID: HampNyA8GqcnljpuJ4Cb9Y3FKPuy8JNUL50gS2Wi
OPENID_CLIENT_SECRET: lCjXz3FADrLNmvV9XZmqCeHEaMY7lxuZeWuo9NomJNT70s3XuNNOP2wMn96fi6cmBAWuIXSBF5nMjf4Mwu3FszAULV9pAWGbvIGV2cC7xgT1DhRVaiTVO1bVGGcaFPJ9
OPENID_ISSUER: https://auth.domr.ovh/application/o/guacamole/
OPENID_JWKS_ENDPOINT: https://auth.domr.ovh/application/o/guacamole/jwks/
OPENID_REDIRECT_URI: https://guac.domr.ovh/
OPENID_USERNAME_CLAIM_TYPE: preferred_username
OPENID_ENABLED: false
image: guacamole/guacamole
networks:
- guacnetwork_compose

View File

@@ -1,5 +0,0 @@
haus.domr.ovh,
haus.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8472
}

View File

@@ -1,9 +0,0 @@
services:
timesy:
image: ghcr.io/awwwsm/haus
logging:
options:
max-size: 1g
restart: always
ports:
- '8472:8080'

View File

@@ -1,2 +0,0 @@
SERVER_ADDRESS=https://huly.domr.ovh
HULY_VERSION=7

View File

@@ -1,5 +0,0 @@
huly.domr.ovh,
huly.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8087
}

View File

@@ -1,157 +0,0 @@
version: "3"
services:
mongodb:
image: "mongo:7-jammy"
container_name: mongodb
environment:
- PUID=1000
- PGID=1000
volumes:
- /home/soenke/docker-data/huly/db:/data/db
ports:
- 27017:27017
restart: unless-stopped
minio:
image: "minio/minio"
command: server /data --address ":9000" --console-address ":9001"
ports:
- 9000:9000
- 9001:9001
volumes:
- /home/soenke/docker-data/huly/files:/data
restart: unless-stopped
elastic:
image: "elasticsearch:7.14.2"
command: |
/bin/sh -c "./bin/elasticsearch-plugin list | grep -q ingest-attachment || yes | ./bin/elasticsearch-plugin install --silent ingest-attachment;
/usr/local/bin/docker-entrypoint.sh eswrapper"
volumes:
- /home/soenke/docker-data/huly/elastic:/usr/share/elasticsearch/data
ports:
- 9200:9200
environment:
- ELASTICSEARCH_PORT_NUMBER=9200
- BITNAMI_DEBUG=true
- discovery.type=single-node
- ES_JAVA_OPTS=-Xms1024m -Xmx1024m
- http.cors.enabled=true
- http.cors.allow-origin=http://localhost:8082
healthcheck:
interval: 20s
retries: 10
test: curl -s http://localhost:9200/_cluster/health | grep -vq '"status":"red"'
restart: unless-stopped
account:
image: hardcoreeng/account:${HULY_VERSION}
links:
- mongodb
- minio
ports:
- 3000:3000
environment:
- SERVER_PORT=3000
- SERVER_SECRET=secret
- MONGO_URL=mongodb://mongodb:27017
- TRANSACTOR_URL=ws://transactor:3333;ws://${SERVER_ADDRESS}:3333
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- FRONT_URL=http://front:8080
- INIT_WORKSPACE=demo-tracker
- MODEL_ENABLED=*
- ACCOUNTS_URL=http://${SERVER_ADDRESS}:3000
- ACCOUNT_PORT=3000
restart: unless-stopped
front:
image: hardcoreeng/front:${HULY_VERSION}
links:
- mongodb
- minio
- elastic
- collaborator
- transactor
ports:
- 8087:8080
environment:
- SERVER_PORT=8080
- SERVER_SECRET=secret
- ACCOUNTS_URL=http://${SERVER_ADDRESS}:3000
- REKONI_URL=http://${SERVER_ADDRESS}:4004
- CALENDAR_URL=http://${SERVER_ADDRESS}:8095
- GMAIL_URL=http://${SERVER_ADDRESS}:8088
- TELEGRAM_URL=http://${SERVER_ADDRESS}:8086
- UPLOAD_URL=/files
- ELASTIC_URL=http://elastic:9200
- COLLABORATOR_URL=ws://${SERVER_ADDRESS}:3078
- COLLABORATOR_API_URL=http://${SERVER_ADDRESS}:3078
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- MONGO_URL=mongodb://mongodb:27017
- TITLE=Huly Self Hosted
- DEFAULT_LANGUAGE=en
- LAST_NAME_FIRST=true
restart: unless-stopped
collaborator:
image: hardcoreeng/collaborator:${HULY_VERSION}
links:
- mongodb
- minio
- transactor
ports:
- 3078:3078
environment:
- COLLABORATOR_PORT=3078
- SECRET=secret
- ACCOUNTS_URL=http://account:3000
- UPLOAD_URL=/files
- MONGO_URL=mongodb://mongodb:27017
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
restart: unless-stopped
transactor:
image: hardcoreeng/transactor:${HULY_VERSION}
links:
- mongodb
- elastic
- minio
- rekoni
- account
ports:
- 3333:3333
environment:
- SERVER_PORT=3333
- SERVER_SECRET=secret
- SERVER_CURSOR_MAXTIMEMS=30000
- ELASTIC_URL=http://elastic:9200
- ELASTIC_INDEX_NAME=huly_storage_index
- MONGO_URL=mongodb://mongodb:27017
- METRICS_CONSOLE=false
- METRICS_FILE=metrics.txt
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- REKONI_URL=http://rekoni:4004
- FRONT_URL=http://${SERVER_ADDRESS}:8087
- SERVER_PROVIDER=ws
- ACCOUNTS_URL=http://account:3000
- LAST_NAME_FIRST=true
- UPLOAD_URL=http://${SERVER_ADDRESS}/files
restart: unless-stopped
rekoni:
image: hardcoreeng/rekoni-service:${HULY_VERSION}
ports:
- 4004:4004
environment:
- SECRET=secret
deploy:
resources:
limits:
memory: 500M
restart: unless-stopped
volumes:
db:
files:
elastic:
etcd:

23
immich/.env Normal file
View File

@@ -0,0 +1,23 @@
# You can find documentation for all the supported env variables at https://docs.immich.app/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/mnt/synology/immich/library
# The location where your database files are stored. Network shares are not supported for the database
DB_DATA_LOCATION=/home/soenke/docker-data/immich/postgres
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# TZ=Etc/UTC
# The Immich version to use. You can pin this to a specific version like "v2.1.0"
IMMICH_VERSION=v2
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=Mb1K7TmwtDeN7Cg7v6zPGwGNYnWti9x5
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

View File

@@ -1,5 +1,5 @@
immich.domr.ovh,
immich.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.194:2283
}
reverse_proxy 192.168.1.65:2283
}

77
immich/docker-compose.yml Normal file
View File

@@ -0,0 +1,77 @@
#
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:9@sha256:546304417feac0874c3dd576e0952c6bb8f06bb4093ea0c9ca303c73cf458f63
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
healthcheck:
disable: false
volumes:
model-cache:

View File

@@ -1,5 +0,0 @@
kopia.domr.ovh,
kopia.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:51515
}

View File

@@ -1,43 +0,0 @@
services:
kopia:
image: kopia/kopia:latest
hostname: Hostname
container_name: Kopia
restart: unless-stopped
ports:
- 51515:51515
# Setup the server that provides the web gui
command:
- server
- start
- --disable-csrf-token-checks
- --insecure
- --address=0.0.0.0:51515
- --server-username=Soenke
- --server-password=Diavid9600
environment:
# Set repository password
KOPIA_PASSWORD: "Diavid9600!9600"
USER: "Soenke"
volumes:
# Mount local folders needed by kopia
- /home/soenke/docker-data/kopia/config:/app/config
- /home/soenke/docker-data/kopia/cache:/app/cache
- /home/soenke/docker-data/kopia/logs:/app/logs
# Mount local folders to snapshot
- /home/soenke/docker-data/kopia/data:/data:ro
# Mount repository location
- /home/soenke/docker-data/kopia/repository:/repository
# Mount path for browsing mounted snaphots
- /home/soenke/docker-data/kopia/tmp:/tmp:shared
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.kopia.http.name: 'kopia'
kuma.kopia.http.url: 'https://kopia.domr.ovh/repo'
kuma.kopia.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'

View File

@@ -40,7 +40,7 @@ services:
MAX_WORKERS: 1
WEB_CONCURRENCY: 1
ALLOW_SIGNUP: true
BASE_URL: https://mealie.home.domroese.eu
BASE_URL: https://mealie.domr.ovh
DB_ENGINE: postgres
POSTGRES_USER: mealie
POSTGRES_PASSWORD: "$hYx%uyO$IAUX3EhXvUtP$GMe4TLgoiLrBTN9nrXh&q8C0TWqp&ku%dEOUPT4GMZ"
@@ -55,10 +55,10 @@ services:
SMTP_USER: ${SYSTEM_EMAIL_USER}
SMTP_PASSWORD: ${SYSTEM_EMAIL_PASSSWORD}
OIDC_AUTH_ENABLED: true
OIDC_PROVIDER_NAME: auth.domr.ovh
OIDC_CONFIGURATION_URL: https://auth.home.domroese.eu/application/o/mealie/.well-known/openid-configuration
OIDC_CLIENT_ID: oVmVbL9Ehd1KAjSgAseAMZw4LHV6gmUfsFEf2Akp
OIDC_CLIENT_SECRET: WP2hs4qKjmEpKQabIvKCBgDwtlm534It526vs3Mg9lrBGgzswG9sCh0nw7ieW9y7D7OMRe0x2gkcHqcdP37LVMBgpR3f2rABSlOduhyZhPQKOUNBk79AQNxYr23Mdaud
OIDC_PROVIDER_NAME: Domröse
OIDC_CONFIGURATION_URL: https://auth.domr.ovh/application/o/mealie/.well-known/openid-configuration
OIDC_CLIENT_ID: "oVmVbL9Ehd1KAjSgAseAMZw4LHV6gmUfsFEf2Akp"
OIDC_CLIENT_SECRET: "WP2hs4qKjmEpKQabIvKCBgDwtlm534It526vs3Mg9lrBGgzswG9sCh0nw7ieW9y7D7OMRe0x2gkcHqcdP37LVMBgpR3f2rABSlOduhyZhPQKOUNBk79AQNxYr23Mdaud"
OIDC_SIGNUP_ENABLED: true
OIDC_USER_GROUP: mealie-users
OIDC_ADMIN_GROUP: mealie-admins

Binary file not shown.

Binary file not shown.

View File

@@ -1 +0,0 @@
/var/run/mysqld/mysqld.sock

Binary file not shown.

Binary file not shown.

View File

View File

@@ -1,11 +0,0 @@
plantit.domr.ovh,
plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3632
}
api.plantit.domr.ovh,
api.plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8632
}

View File

@@ -1,33 +0,0 @@
name: plant-it
services:
server:
image: msdeluise/plant-it-server:latest
env_file: server.env
depends_on:
- db
volumes:
- "/home/soenke/docker-data/plantit/upload-dir:/upload-dir"
ports:
- "8632:8080"
- "3632:3000"
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.plantit.http.name: 'plantit'
kuma.plantit.http.url: 'https://plantit.domr.ovh/'
kuma.plantit.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'
kuma.plantitapi.http.name: 'plantit api'
kuma.plantitapi.http.url: 'https://api.plantit.domr.ovh/'
kuma.plantitapi.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'
db:
image: mysql:8.0
restart: always
env_file: server.env
volumes:
- "/home/soenke/docker-data/plantit/db:/var/lib/mysql"

View File

@@ -1,51 +0,0 @@
#
# DB
#
MYSQL_HOST=db
MYSQL_PORT=3306
MYSQL_USERNAME=root
MYSQL_PSW=root
MYSQL_DATABASE=bootdb
MYSQL_ROOT_PASSWORD=SuSePaWo
#
# Server config
#
USERS_LIMIT=-1 # less then 0 means no limit
UPLOAD_DIR=/upload-dir # path to the directory used to store uploaded images, if on docker deployment leave as it is and change the volume binding in the docker-compose file if needed
API_PORT=8080
FLORACODEX_URL=https://api.floracodex.com
FLORACODEX_KEY=CzugP80KW7ujgxexVwD9ejA7RIrZT6AoORctImt0XgUS-5MdC1lmY02vbg7c5DJ4
ALLOWED_ORIGINS=* # CORS allowed origins (comma separated list)
LOG_LEVEL=DEBUG # could be: DEBUG, INFO, WARN, ERROR
REMINDER_NOTIFY_CHECK=0 30 7 * * * # 6-values crontab expression to set the check time for reminders
MAX_REQUESTS_PER_MINUTE=100 # rate limiting of the upcoming requests
#
# Notification
#
NTFY_ENABLED=true # if "false" ntfy service won't be available as notification dispatcher
GOTIFY_ENABLED=true # if "false" ntfy service won't be available as notification dispatcher
#
# Cache
#
CACHE_TYPE=none # Cache type. By default, it's "redis" but can also be "none"
#CACHE_TTL=86400
#CACHE_HOST=cache
#CACHE_PORT=6379
#CACHE_PASSWORD=
#CACHE_USERNAME=
#
# JWT
#
JWT_SECRET=6a837ed16d20acb8b0af7c7d5c16725d58da86e89649eb857293e2995f0ae09a8c85694476fe32e46d25c612af9ade6e2374a4b93ff0348d1529a8ce583b3520fb380ab69e12f80097332835cd30c605612b475c6d675d47c1616289790cb2a8b268dd8a08150b8a7681a72689e650877c010ff700906df1b539ca4f651a1276a62e2d2c199819a8f7ded0291b58b6668fd280d1ab5258329362f64a37786cd1801e0286cd812234c48d85beb84ce51f70d0f4c23ebd49646377c1cc9b29b050080209699fc517f7deaa960279d86b8c2ca28cfe2d5a31a24713145ca5434c940e75455bc1b99d4fbe9569b3d80887b45d4ebb4107879567fce3f2c5ca959189
JWT_EXP=1
#
# Cache
#
ALLOWED_ORIGINS=*
SSL_ENABLED=false

View File

Binary file not shown.