Compare commits

...

13 Commits

17 changed files with 379 additions and 36 deletions

View File

@@ -30,7 +30,7 @@ client {
path = "/hdd/gitea/" path = "/hdd/gitea/"
read_only = false read_only = false
} }
host_volume "minio-ssd" { host_volume "minio-ssd" {
path = "/ssd/minio/" path = "/ssd/minio/"
read_only = false read_only = false
@@ -50,9 +50,33 @@ client {
path = "/ssd/sqlite/" path = "/ssd/sqlite/"
read_only = false read_only = false
} }
host_volume "jellyfinCache" {
path = "/hdd/multimedia/cache/"
read_only = false
}
host_volume "jellyfinConfig" {
path = "/hdd/multimedia/config/"
read_only = false
}
host_volume "media" {
path = "/hdd/multimedia/media/"
read_only = false
}
}
plugin "docker" {
config {
allow_privileged = true
volumes {
enabled = true
}
}
} }
ui { ui {
# Comment to disable UI, it listens on port 4646 # Comment to disable UI, it listens on port 4646
enabled = true enabled = true
} }

View File

@@ -5,8 +5,8 @@
- name: Enable community packages - name: Enable community packages
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/apk/repositories path: /etc/apk/repositories
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community' regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community' line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
state: present state: present
- name: Update apk packages - name: Update apk packages

View File

@@ -5,8 +5,8 @@
- name: Enable community packages - name: Enable community packages
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/apk/repositories path: /etc/apk/repositories
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community' regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community' line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
state: present state: present
- name: Update apk packages - name: Update apk packages

View File

@@ -5,8 +5,8 @@
- name: Enable community packages - name: Enable community packages
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/apk/repositories path: /etc/apk/repositories
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community' regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community' line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
state: present state: present
- name: Update apk packages - name: Update apk packages

View File

@@ -1,12 +1,12 @@
- name: Install Caddy on Alpine Linux - name: Install Traefik on Alpine Linux
hosts: all hosts: all
tasks: tasks:
- name: Enable community packages - name: Enable community packages
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/apk/repositories path: /etc/apk/repositories
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community' regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community' line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
state: present state: present
- name: Update apk packages - name: Update apk packages

View File

@@ -24,7 +24,7 @@ job "penpot-standalone" {
port = "ingress" port = "ingress"
tags = [ tags = [
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.penpot-standalone.tls=true", "traefik.http.routers.penpot-standalone.tls=true",
"traefik.http.routers.penpot-standalone.entrypoints=websecure", "traefik.http.routers.penpot-standalone.entrypoints=websecure",
"traefik.http.routers.penpot-standalone.rule=Host(`${local.TRAEFIK_DOMAIN}`)", "traefik.http.routers.penpot-standalone.rule=Host(`${local.TRAEFIK_DOMAIN}`)",
@@ -65,7 +65,7 @@ job "penpot-standalone" {
driver = "docker" driver = "docker"
config { config {
image = "penpotapp/frontend:1.19.3" image = "penpotapp/frontend:2.0.1"
ports = ["ingress"] ports = ["ingress"]
} }
@@ -111,7 +111,7 @@ job "penpot-standalone" {
local_bind_address = "127.0.0.1" local_bind_address = "127.0.0.1"
local_bind_port = 1025 local_bind_port = 1025
} }
} }
tags = ["traefik.enable=false"] # Hide penpot-backend envoy from traefik tags = ["traefik.enable=false"] # Hide penpot-backend envoy from traefik
} }
} }
@@ -120,7 +120,7 @@ job "penpot-standalone" {
task "backend" { task "backend" {
driver = "docker" driver = "docker"
config { config {
image = "penpotapp/backend:1.19.3" image = "penpotapp/backend:2.0.1"
ports = ["ingress"] ports = ["ingress"]
} }
@@ -162,10 +162,10 @@ job "penpot-standalone" {
mode = "bridge" mode = "bridge"
} }
task "exporter" { task "exporter" {
driver = "docker" driver = "docker"
config { config {
image = "penpotapp/exporter:1.19.3" image = "penpotapp/exporter:2.0.1"
} }
env { env {
@@ -187,13 +187,13 @@ job "penpot-standalone" {
local_bind_address = "127.0.0.1" local_bind_address = "127.0.0.1"
local_bind_port = 6379 local_bind_port = 6379
} }
} }
tags = ["traefik.enable=false"] # Hide envoy from traefik tags = ["traefik.enable=false"] # Hide envoy from traefik
} }
} }
} }
} }
// penpot-standalone-postgres // penpot-standalone-postgres
group "postgres" { group "postgres" {
count = 1 count = 1
@@ -238,7 +238,7 @@ job "penpot-standalone" {
// penpot-standalone-redis-cache // penpot-standalone-redis-cache
group "redis" { group "redis" {
count = 1 count = 1
network { network {
mode = "bridge" mode = "bridge"
port "redis" { port "redis" {
@@ -268,12 +268,12 @@ job "penpot-standalone" {
ports = ["redis"] ports = ["redis"]
} }
} }
} }
// penpot-standalone-fake-smtp // penpot-standalone-fake-smtp
group "mailcatcher" { group "mailcatcher" {
count = 1 count = 1
network { network {
mode = "bridge" mode = "bridge"
port "webUI" { port "webUI" {

View File

@@ -0,0 +1,3 @@
FROM gitea/act_runner:0.2.10-dind-rootless
USER root

View File

@@ -14,7 +14,7 @@ job "gitea" {
network { network {
mode = "bridge" mode = "bridge"
port "ingress" { port "ingress" {
to = 3000 static = 3000
} }
} }
@@ -59,7 +59,7 @@ job "gitea" {
driver = "docker" driver = "docker"
config { config {
image = "gitea/gitea:1.21.7" image = "gitea/gitea:1.22.1"
ports = ["ingress"] ports = ["ingress"]
} }
@@ -67,6 +67,11 @@ job "gitea" {
volume = "gitea-data" volume = "gitea-data"
destination = "/data" destination = "/data"
} }
resources {
cpu = 3000
memory = 2000
}
} }
} }
} }

View File

@@ -24,4 +24,29 @@ If you want to deploy this, you will need to verify you have a valid host volume
## Configuring Gitea ## Configuring Gitea
There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Gitea. Postgres should be mounted to the container on the standard `5432` port so you can select postgres as the database type and use `127.0.0.1:5432` as the address and input the username, password, and database name you created for Gitea to use. There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Gitea. Postgres should be mounted to the container on the standard `5432` port so you can select postgres as the database type and use `127.0.0.1:5432` as the address and input the username, password, and database name you created for Gitea to use.
If you need help making those credentials, take a look at the [postgres readme](../postgres/readme.md#make-a-new-database). If you need help making those credentials, take a look at the [postgres readme](../postgres/readme.md#make-a-new-database).
# Adding CI/CD
Gitea has a fork of act runner that can be used to run Github actions. In order to deploy this with Nomad, you will need to leverage Docker in Docker (DinD) with privileged mode enabled in Docker or pay for the bussiness plan of Docker to have better app isolation. The default runner image provided by Gitea was failing to start DinD Daemon so I included a dockerfile that you can use to specify that the container should be ran as the root user.
1. Build Image
```bash
docker build --network host --platform linux/amd64 -t <your_gitea_domain>/caleb/nomad_act_runner:0.0.1 .
```
> [!NOTE]
> You may not need to specify the platform flag. If you use Apple Silicon but deploy to X86, you will need to include the flag.
2. Push Image
```bash
docker push <your_gitea_domain>/caleb/nomad_act_runner:0.0.1
```
4. Run the nomad job with the Gitea_Runner_Token
```bash
nomad job run -var "grt=<your_token>" -var "domain=<gitea_domain>" runner.nomad.hcl
```
> [!NOTE]
> If you prefer to not use cli variables, you can update the top of the Nomad Job Spec and manually put in the env variables.

View File

@@ -0,0 +1,71 @@
variable "grt" {
type = string
description = "Gitea runner token"
}
variable "domain" {
type = string
description = "Gitea Domain Name"
}
locals {
GITEA_RUNNER_TOKEN = var.grt # Replace with raw token surrounded by quotes if you don't want to pass via cli or using web ui
GITEA_DOMAIN = var.domain # Replace with domain surrounded by quotes if you don't want to pass via cli or using web ui
GITEA_RUNNER_NAME = "${NOMAD_TASK_NAME}-${NOMAD_ALLOC_INDEX}"
}
job "gitea-runner" {
datacenters = ["dc1"]
type = "service"
group "application" {
count = 1
scaling {
enabled = true
min = 1
max = 5
}
network {
mode = "bridge"
}
service {
name = "gitea-runner"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "gitea"
local_bind_address = "127.0.0.1"
local_bind_port = 3000
}
}
tags = ["traefik.enable=false"] # Hide envoy from traefik
}
}
}
task "gitea-runner" {
driver = "docker"
config {
image = "${local.GITEA_DOMAIN}/caleb/nomad_act_runner:0.0.1"
privileged = true
}
env = {
GITEA_INSTANCE_URL="http://${NOMAD_UPSTREAM_ADDR_gitea}"
GITEA_RUNNER_REGISTRATION_TOKEN="${local.GITEA_RUNNER_TOKEN}"
GITEA_RUNNER_NAME="${local.GITEA_RUNNER_NAME}"
}
resources {
cpu = 2000
memory = 2000
}
}
}
}

View File

@@ -0,0 +1,89 @@
locals {
SUBDOMAIN = "jellyfin." // End with dot or leave blamk for root domain
DOMAIN = "example.com"
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
}
job "jellyfin" {
datacenters = ["dc1"]
type = "service"
group "application" {
count = 1
network {
mode = "host"
port "httpIngress" { static = 8096 }
port "serviceDiscovery" { static = 1900 }
port "clientDiscovery" { static = 7359 }
}
volume "jellyfin-cache" {
type = "host"
source = "jellyfinCache"
}
volume "jellyfin-config" {
type = "host"
source = "jellyfinConfig"
}
volume "jellyfin-data" {
type = "host"
source = "media"
}
service {
name = "jellyfin"
port = "httpIngress"
tags = [
"traefik.enable=true",
"traefik.http.routers.jellyfin.tls=true",
"traefik.http.routers.jellyfin.entrypoints=websecure",
"traefik.http.routers.jellyfin.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
]
check {
type = "http"
path = "/health"
interval = "10s"
timeout = "2s"
}
}
task "jellyfin" {
driver = "docker"
config {
image = "jellyfin/jellyfin:2024080505"
ports = ["httpIngress", "serviceDiscovery", "clientDiscovery"]
}
env = {
JELLYFIN_PublishedServerUrl="${local.TRAEFIK_DOMAIN}"
}
volume_mount {
volume = "jellyfin-cache"
destination = "/cache"
}
volume_mount {
volume = "jellyfin-config"
destination = "/config"
}
volume_mount {
volume = "jellyfin-data"
destination = "/media"
}
resources {
cpu = 2000
memory = 1024
memory_max = 2048
}
}
}
}

View File

@@ -0,0 +1,36 @@
# Jellyfin
Jellyfin is a Free Software Media System that puts you in control of managing and streaming your media. It is an alternative to the proprietary Emby and Plex, to provide media from a dedicated server to end-user devices via multiple apps.
## Nomad Job for Gitea
You will need to modify the job spec items listed under [TODO](./readme.md#TODO) but there are no Jellyfin specific adjustments needed. If you run it, it will register with consul and be available to Traefik for routing. If the domain name is configured correctly, you should be able to reach the Jellyfin setup page to make the needed configuration changes such as defining the media libraries.
## Service Dependencies
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Cache
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Config
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Media
## TODO
If you want to deploy this, you will need to verify you have the necessary host volumes.
| Line | Default | Adjustment |
| ---- | --------------------------- | --------------------------------------------------------------------------- |
| 23 | `source = "jellyfinCache"` | Change `jellyfinCache` to a valid host volume name |
| 28 | `source = "jellyfinConfig"` | Change `jellyfinConfig` to a valid host volume name |
| 33 | `source = "media"` | Change `media` to a valid host volume name |
| 68 | `volume = "jellyfinCache"` | Change `jellyfinCache` to the host volume defined on line 21 if applicable |
| 74 | `volume = "jellyfinConfig"` | Change `jellyfinConfig` to the host volume defined on line 26 if applicable |
| 79 | `volume = "media"` | Change `media` to the host volume defined on line 31 if applicable |
> To make the instance accessible through TRAEFIK you will need to define the domain to listen on by setting the value(s) on lines 2 and 3.
## Configuring Jellyfin
There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Jellyfin.
> I recomend using a single root directory for media and then creating subdirectories for each type of media. This will make it easier to manage the media via SFTP and to configure Jellyfin.
> If this is deployed on Alpine Linux, you won't be able to pass through dedicated NVIDIA hardware because `nvidia-container-toolkit` is not available on MUSL. You will need to use a different root operating system like Ubuntu if you want hardware acceleration with NVIDIA hardware.

View File

@@ -62,7 +62,7 @@ job "penpot" {
port = "ingress" port = "ingress"
tags = [ tags = [
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.penpot.tls=true", "traefik.http.routers.penpot.tls=true",
"traefik.http.routers.penpot.entrypoints=websecure", "traefik.http.routers.penpot.entrypoints=websecure",
"traefik.http.routers.penpot.rule=Host(`${local.TRAEFIK_DOMAIN}`)", "traefik.http.routers.penpot.rule=Host(`${local.TRAEFIK_DOMAIN}`)",
@@ -103,7 +103,7 @@ job "penpot" {
driver = "docker" driver = "docker"
config { config {
image = "penpotapp/frontend:1.19.3" image = "penpotapp/frontend:2.0.1"
ports = ["ingress"] ports = ["ingress"]
} }
@@ -153,7 +153,7 @@ job "penpot" {
local_bind_address = "127.0.0.1" local_bind_address = "127.0.0.1"
local_bind_port = 1025 local_bind_port = 1025
} }
} }
tags = ["traefik.enable=false"] # Hide penpot-backend envoy from traefik tags = ["traefik.enable=false"] # Hide penpot-backend envoy from traefik
} }
} }
@@ -162,7 +162,7 @@ job "penpot" {
task "backend" { task "backend" {
driver = "docker" driver = "docker"
config { config {
image = "penpotapp/backend:1.19.3" image = "penpotapp/backend:2.0.1"
ports = ["ingress"] ports = ["ingress"]
} }
@@ -207,10 +207,10 @@ job "penpot" {
mode = "bridge" mode = "bridge"
} }
task "exporter" { task "exporter" {
driver = "docker" driver = "docker"
config { config {
image = "penpotapp/exporter:1.19.3" image = "penpotapp/exporter:2.0.1"
} }
env { env {
@@ -232,10 +232,10 @@ job "penpot" {
local_bind_address = "127.0.0.1" local_bind_address = "127.0.0.1"
local_bind_port = 6379 local_bind_port = 6379
} }
} }
tags = ["traefik.enable=false"] # Hide envoy from traefik tags = ["traefik.enable=false"] # Hide envoy from traefik
} }
} }
} }
} }
} }

View File

@@ -0,0 +1,90 @@
# Listening Domain
locals {
SUBDOMAIN = "umami" // End with dot or leave blamk for root domain
DOMAIN = "example.com"
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
}
// OP is 1Password for CLI
locals {
OP_DB_USER = "op://InfraSecrets/Umami/ENV_SECRETS/PostgresUsername"
OP_DB_PASSWORD = "op://InfraSecrets/Umami/ENV_SECRETS/PostgresPassword"
OP_AppSecret = "op://InfraSecrets/Umami/ENV_SECRETS/AppSecret"
}
locals {
USER_PASSWORD = "${local.OP_DB_USER}:${local.OP_DB_PASSWORD}"
UMAMI_APPSECRET = "${local.OP_AppSecret}"
UMAMI_DB_URL = "postgresql://${local.USER_PASSWORD}@127.0.0.1:5432/umami"
UMAMI_DB_TYPE = "postgresql"
}
job "umami" {
datacenters = ["dc1"]
type = "service"
group "application" {
count = 1
network {
mode = "bridge"
port "httpIngress" {
to = 3000
}
}
service {
name = "umami"
port = "httpIngress"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_address = "127.0.0.1"
local_bind_port = 5432
}
}
tags = ["traefik.enable=false"] # Hide envoy from traefik
}
}
tags = [
"traefik.enable=true",
"traefik.http.routers.umami.tls=true",
"traefik.http.routers.umami.entrypoints=websecure",
"traefik.http.routers.umami.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
]
check {
type = "http"
path = "/api/heartbeat"
interval = "10s"
timeout = "2s"
}
}
task "umami" {
driver = "docker"
config {
image = "ghcr.io/umami-software/umami:postgresql-latest"
ports = ["httpIngress"]
}
env = {
DATABASE_URL="${local.UMAMI_DB_URL}"
DATABASE_TYPE="${local.UMAMI_DB_TYPE}"
APP_SECRET:"${local.UMAMI_APPSECRET}"
}
resources {
cpu = 1000
memory = 512
memory_max = 1024
}
}
}
}

View File

@@ -5,7 +5,7 @@
- name: Update consul config - name: Update consul config
ansible.builtin.copy: ansible.builtin.copy:
mode: preserve mode: preserve
src: ./host_config/consul.hcl src: ../host_config/consul.hcl
dest: /etc/consul/server.hcl dest: /etc/consul/server.hcl
- name: Restart consul service - name: Restart consul service

View File

@@ -5,7 +5,7 @@
- name: Update nomad config - name: Update nomad config
ansible.builtin.copy: ansible.builtin.copy:
mode: preserve mode: preserve
src: ./host_config/nomad.hcl src: ../host_config/nomad.hcl
dest: /etc/nomad.d/server.hcl dest: /etc/nomad.d/server.hcl
- name: Restart nomad service - name: Restart nomad service

View File

@@ -5,7 +5,7 @@
- name: Update traefik config - name: Update traefik config
ansible.builtin.copy: ansible.builtin.copy:
mode: preserve mode: preserve
src: ./host_config/traefik.yml src: ../host_config/traefik.yml
dest: /etc/traefik/traefik.yaml # Alpine default config is yaml dest: /etc/traefik/traefik.yaml # Alpine default config is yaml
- name: Restart traefik service - name: Restart traefik service