Compare commits

..

26 Commits

Author SHA1 Message Date
696813fc72 Update Gitea version and fix typo 2024-08-15 15:44:18 -07:00
d099c4dcc2 Add support for Gitea Actions 2024-08-15 05:44:43 -07:00
177d7b480c Reduce default resources allotment 2024-08-15 04:30:18 -07:00
641beb1f81 Add Umami Job Spec 2024-08-14 19:49:28 -07:00
0e5712a05e Provision more Gitea Compute and Ram for mirroring large repos 2024-08-12 16:30:21 -07:00
318c8e5ae1 Fix typo and update Jellyfin Version 2024-08-07 03:49:12 -07:00
75fcfb90bf Merge pull request 'Update Penpot to 2.0.1' (#1) from update-penpot-version into main
Reviewed-on: #1
2024-04-21 11:32:47 +00:00
41c1543979 Update Penpot to 2.0.1 2024-04-21 04:26:23 -07:00
8663b87f0f Give Additional Compute Resources To Stop Crashes 2024-03-12 15:16:43 -07:00
d4e2b4a241 Add Jellyfin Service 2024-03-05 18:58:58 -08:00
695acaa48a Fix File Routing Error 2024-03-05 16:42:58 -08:00
71c46ad953 Change Formatting To Zed Editor's Default 2024-03-05 13:20:55 -08:00
fdacbec5ea Fix Playbook Title Typo 2024-03-05 10:18:16 -08:00
16bb096e34 Refactor Gitea to Set Domain At Top Of File 2024-02-28 16:26:55 -08:00
49a25fff41 Update Gitea Version to 1.21.7 - No Known Breaking Changes 2024-02-28 15:56:29 -08:00
829bb876e0 Change Domain Configuration To Better Support Host Domain Declaration 2024-02-26 20:19:56 -08:00
01337c1e84 Add Penpot Standalone For Easily Deploying Penpot Demo 2024-02-26 19:57:53 -08:00
fca39e54b3 Remove Not Needed Port Labels 2024-02-26 17:25:00 -08:00
9c64bb856f Fix Penpot Exporter From Hitting Bad Gateway 2024-02-26 17:17:18 -08:00
927d57e9c9 Fix Loading Previews in Penpot 2024-02-26 16:40:51 -08:00
149fd307f2 Add Penpot App 2024-02-23 04:32:56 -08:00
b686486c93 Add DomainRedirect Service 2024-02-20 14:51:36 -08:00
8428e1a269 Add Mailcatcher Service 2024-02-20 13:17:48 -08:00
7509ca056e Add Minio Service 2024-02-06 20:27:30 -08:00
6926e74b8a Fix Hyperlinks in Markdown Documenation 2024-02-06 18:10:40 -08:00
0197e47292 Removed Failed Comment Prompt 2024-02-06 17:15:17 -08:00
29 changed files with 1312 additions and 35 deletions

View File

@@ -50,5 +50,3 @@ Be aware, that if you have an error it could cause the service to fail to start
### Nomad Jobs ### Nomad Jobs
This is where the nomad job specs are stored. You can learn more about the job specs in the [nomad_jobs readme](./nomad_jobs/README.md). This is where the nomad job specs are stored. You can learn more about the job specs in the [nomad_jobs readme](./nomad_jobs/README.md).

View File

@@ -30,7 +30,7 @@ client {
path = "/hdd/gitea/" path = "/hdd/gitea/"
read_only = false read_only = false
} }
host_volume "minio-ssd" { host_volume "minio-ssd" {
path = "/ssd/minio/" path = "/ssd/minio/"
read_only = false read_only = false
@@ -50,9 +50,33 @@ client {
path = "/ssd/sqlite/" path = "/ssd/sqlite/"
read_only = false read_only = false
} }
host_volume "jellyfinCache" {
path = "/hdd/multimedia/cache/"
read_only = false
}
host_volume "jellyfinConfig" {
path = "/hdd/multimedia/config/"
read_only = false
}
host_volume "media" {
path = "/hdd/multimedia/media/"
read_only = false
}
}
plugin "docker" {
config {
allow_privileged = true
volumes {
enabled = true
}
}
} }
ui { ui {
# Comment to disable UI, it listens on port 4646 # Comment to disable UI, it listens on port 4646
enabled = true enabled = true
} }

View File

@@ -5,8 +5,8 @@
- name: Enable community packages - name: Enable community packages
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/apk/repositories path: /etc/apk/repositories
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community' regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community' line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
state: present state: present
- name: Update apk packages - name: Update apk packages

View File

@@ -5,8 +5,8 @@
- name: Enable community packages - name: Enable community packages
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/apk/repositories path: /etc/apk/repositories
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community' regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community' line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
state: present state: present
- name: Update apk packages - name: Update apk packages

View File

@@ -5,8 +5,8 @@
- name: Enable community packages - name: Enable community packages
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/apk/repositories path: /etc/apk/repositories
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community' regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community' line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
state: present state: present
- name: Update apk packages - name: Update apk packages

View File

@@ -1,12 +1,12 @@
- name: Install Caddy on Alpine Linux - name: Install Traefik on Alpine Linux
hosts: all hosts: all
tasks: tasks:
- name: Enable community packages - name: Enable community packages
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/apk/repositories path: /etc/apk/repositories
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community' regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community' line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
state: present state: present
- name: Update apk packages - name: Update apk packages

View File

@@ -1,5 +1,3 @@
// Write me a cheatsheet for ZFS
# ZFS Cheatsheet # ZFS Cheatsheet
| Command | Description | | Command | Description |

View File

@@ -1,6 +1,6 @@
# Nomad Job Specs # Nomad Job Specs
This directory contains two sub-directories: `apps` and `services`. If you are going to ignore this README you should at least read how this repository [Manages Secrets](#Managing%20Secrets). This directory contains two sub-directories: `apps` and `services`. If you are going to ignore this README you should at least read how this repository [Manages Secrets](#managing-secrets).
## Apps Directory ## Apps Directory
@@ -34,9 +34,9 @@ op inject -i postgres.nomad.hcl -o postgres.nomad.hcl.secret
| Service | Description by LLM | Service | App Spec | | Service | Description by LLM | Service | App Spec |
| --- | --- | --- | -- | | --- | --- | --- | -- |
| Caddy | Caddy is a web server and reverse proxy with automatic HTTPS written in Go. | [Service Readme](./services/caddy/README.md) | | Caddy | Caddy is a web server and reverse proxy with automatic HTTPS written in Go. | [Service Readme](./services/caddy/readme.md) |
| Gitea | Gitea is a self-hosted Git service written in Go. | [Service Readme](./services/gitea/README.md) | [App Spec](./apps/gitea-standalone.nomad.hcl) | | Gitea | Gitea is a self-hosted Git service written in Go. | [Service Readme](./services/gitea/readme.md) | [App Spec](./apps/gitea-standalone.nomad.hcl) |
| Minio | MinIO is a high performance object storage server compatible with Amazon S3 APIs | [Service Readme](./services/minio/README.md) | | | Minio | MinIO is a high performance object storage server compatible with Amazon S3 APIs | [Service Readme](./services/minio/readme.md) | |
| Penpot | Penpot is the first Open Source design and prototyping platform meant for cross-domain teams. Non dependent on operating systems, Penpot is web based and works with open web standards (SVG). For all and empowered by the community. | [Service Readme](./services/penpot/README.md) | [App Spec](./apps/penpot-standalone.nomad.hcl) | | Penpot | Penpot is the first Open Source design and prototyping platform meant for cross-domain teams. Non dependent on operating systems, Penpot is web based and works with open web standards (SVG). For all and empowered by the community. | [Service Readme](./services/penpot/readme.md) | [App Spec](./apps/penpot-standalone.nomad.hcl) |
| Postgres | PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance. | [Service Readme](./services/postgres/README.md) | | | Postgres | PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance. | [Service Readme](./services/postgres/readme.md) | |
| Redis | Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache, and message broker. | [Service Readme](./services/redis/README.md) | | | Redis | Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache, and message broker. | [Service Readme](./services/redis/readme.md) | |

View File

@@ -0,0 +1,306 @@
locals {
TRAEFIK_DOMAIN = "penpot.example.local"
PENPOT_PUBLIC_URI = "https://${local.TRAEFIK_DOMAIN}"
PENPOT_SECRET_KEY = "op://InfraSecrets/7hbsxng22unjqc4wkj62qniu2u/credential" # Try running `openssl rand -hex 32` to generate a random secret key
PENPOT_FLAGS = "enable-demo-users"
}
job "penpot-standalone" {
datacenters = ["dc1"]
group "frontend" {
count = 1
network {
mode = "bridge"
port "ingress" {
to = 80
}
}
# Expose frontend to internet through traefik
service {
name = "penpot-standalone"
port = "ingress"
tags = [
"traefik.enable=true",
"traefik.http.routers.penpot-standalone.tls=true",
"traefik.http.routers.penpot-standalone.entrypoints=websecure",
"traefik.http.routers.penpot-standalone.rule=Host(`${local.TRAEFIK_DOMAIN}`)",
]
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "penpot-standalone-backend"
local_bind_address = "127.0.0.1"
local_bind_port = 6060
}
upstreams {
destination_name = "penpot-standalone-exporter"
local_bind_address = "127.0.0.1"
local_bind_port = 6061
}
upstreams {
destination_name = "minio"
local_bind_address = "127.0.0.1"
local_bind_port = 9000
}
}
tags = ["traefik.enable=false"] # Hide service from traefik
}
}
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
task "frontend" {
driver = "docker"
config {
image = "penpotapp/frontend:2.0.1"
ports = ["ingress"]
}
env {
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
PENPOT_BACKEND_URI = "http://127.0.0.1:6060"
PENPOT_EXPORTER_URI = "http://127.0.0.1:6061"
PENPOT_FLAGS = local.PENPOT_FLAGS
}
}
}
// penpot-standalone-backend
group "backend" {
network {
mode = "bridge"
}
service {
# Make available to other services by the 'penpot-backend' name
name = "penpot-standalone-backend"
port = "6060"
tags = ["traefik.enable=false"] # Hide redis from traefik
# Make available through the consul service mesh
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "penpot-standalone-postgres"
local_bind_address = "127.0.0.1"
local_bind_port = 5432
}
upstreams {
destination_name = "penpot-standalone-redis-cache"
local_bind_address = "127.0.0.1"
local_bind_port = 6379
}
upstreams {
destination_name = "penpot-standalone-fake-smtp"
local_bind_address = "127.0.0.1"
local_bind_port = 1025
}
}
tags = ["traefik.enable=false"] # Hide penpot-backend envoy from traefik
}
}
}
task "backend" {
driver = "docker"
config {
image = "penpotapp/backend:2.0.1"
ports = ["ingress"]
}
env {
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
PENPOT_SECRET_KEY = local.PENPOT_SECRET_KEY
PENPOT_FLAGS = local.PENPOT_FLAGS
PENPOT_DATABASE_URI = "postgresql://127.0.0.1:5432/penpot"
PENPOT_DATABASE_USERNAME = "penpot"
PENPOT_DATABASE_PASSWORD = "not-a-secure-password"
PENPOT_REDIS_URI = "redis://127.0.0.1:6379/0"
PENPOT_TELEMERY_ENABLED = "false"
PENPOT_ASSETS_STORAGE_BACKEND = "assets-fs"
PENPOT_STORAGE_ASSETS_FS_DIRECTORY = "/opt/data/assets"
PENPOT_SMTP_DEFAULT_FROM = "no-reply+penpot-standalone@example.local"
PENPOT_SMTP_DEFAULT_REPLY_TO = "no-reply+penpot-standalone@example.local"
PENPOT_SMTP_HOST = "127.0.0.1"
PENPOT_SMTP_PORT = "1025"
PENPOT_SMTP_USERNAME = ""
PENPOT_SMTP_PASSWORD = ""
PENPOT_SMTP_TLS = "false"
PENPOT_SMTP_SSL = "false"
}
resources {
cpu = 8000
memory = 1024
memory_max = 2048
}
}
}
// penpot-standalone-exporter
group "exporter" {
network {
mode = "bridge"
}
task "exporter" {
driver = "docker"
config {
image = "penpotapp/exporter:2.0.1"
}
env {
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
PENPOT_REDIS_URI = "redis://127.0.0.1:6379/0"
}
}
service {
name = "penpot-standalone-exporter"
port = "6061"
tags = ["traefik.enable=false"] # Hide envoy from traefik
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "penpot-standalone-redis-cache"
local_bind_address = "127.0.0.1"
local_bind_port = 6379
}
}
tags = ["traefik.enable=false"] # Hide envoy from traefik
}
}
}
}
// penpot-standalone-postgres
group "postgres" {
count = 1
network {
mode = "bridge"
port "ingress" {
to = 5432
}
}
service {
# Make available to other services by the 'postgres' name
name = "penpot-standalone-postgres"
port = "5432"
tags = ["traefik.enable=false"] # Hide postgres from traefik
# Make available through the consul service mesh
connect {
sidecar_service {
tags = ["traefik.enable=false"] # Hide postgres envoy from traefik
}
}
}
task "postgres" {
driver = "docker"
config {
image = "postgres:16.1-alpine3.19"
ports = ["ingress"]
}
env = {
POSTGRES_USER="penpot",
POSTGRES_PASSWORD="not-a-secure-password"
POSTGRES_DB="penpot"
}
}
}
// penpot-standalone-redis-cache
group "redis" {
count = 1
network {
mode = "bridge"
port "redis" {
to = 6379
}
}
service {
# Make available to other services by the 'redis-cache' name
name = "penpot-standalone-redis-cache"
port = "6379"
tags = ["traefik.enable=false"] # Hide redis from traefik
# Make available through the consul service mesh
connect {
sidecar_service {
tags = ["traefik.enable=false"] # Hide redis envoy from traefik
}
}
}
task "redis" {
driver = "docker"
config {
image = "redis:7.2.3-alpine"
ports = ["redis"]
}
}
}
// penpot-standalone-fake-smtp
group "mailcatcher" {
count = 1
network {
mode = "bridge"
port "webUI" {
to = 1080
}
}
service {
# Make available to other services by the 'fake-smtp' name
name = "penpot-standalone-fake-smtp"
port = "1025"
tags = ["traefik.enable=false"] # Hide redis from traefik
connect {
sidecar_service {
tags = ["traefik.enable=false"] # Hide redis envoy from traefik
}
}
}
task "mailcatcher" {
driver = "docker"
config {
image = "sj26/mailcatcher:latest"
ports = ["webUI"]
}
}
}
}

View File

@@ -0,0 +1,52 @@
locals {
HOST = "example.local"
TARGET = "https://example.com/path"
}
job "domainredirect" {
datacenters = ["dc1"]
type = "service"
group "domainredirect" {
count = 1
network {
port "http" {
to = 8080
}
}
service {
name = "domainredirect"
provider = "consul"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.domainredirect.tls=true",
"traefik.http.routers.domainredirect.entrypoints=websecure",
"traefik.http.routers.domainredirect.rule=Host(`${local.HOST}`)",
]
check {
type = "http"
path = "/health"
interval = "10s"
timeout = "2s"
}
}
task "domainredirect" {
driver = "docker"
config {
image = "git.cbraaten.dev/caleb/domainredirect:latest"
ports = ["http"]
}
env {
REDIRECT_TARGET = local.TARGET
}
}
}
}

View File

@@ -0,0 +1,18 @@
# domainredirect
domainredirect is just a redirect service that will redirect any traffic to the url you you specify. This is useful for redirecting traffic from one domain to another.
## Nomad Job for domainredirect
The domainredirect expects a REDIRECT_TARGET environment variable to be set. This is the complete url that the domainredirect will redirect to. You can set it on line 2 of the job spec.
## TODO
If you want to deploy this, you will need to update the domain name in the job spec.
| Line | Default | Adjustment |
| --- | --- | --- |
| 2 | `HOST = "example.local"` | Change `example.local` to the domain you are listening on |
| 3 | `TARGET = "https://example.com/path"` | Change `example.com/path` to your destination |
## Request
If you are deploying this regularly, please consider pulling the image and pushing it to your own registry. This will help reduce the load on my registry and help keep the image available for everyone. (Although it's not that complex, you could also [build the image yourself.](https://git.cbraaten.dev/Caleb/DomainRedirect))

View File

@@ -0,0 +1,3 @@
FROM gitea/act_runner:0.2.10-dind-rootless
USER root

View File

@@ -1,3 +1,9 @@
locals {
SUBDOMAIN = "git." // End with dot or leave blamk for root domain
DOMAIN = "example.local"
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
}
job "gitea" { job "gitea" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "service"
@@ -8,7 +14,7 @@ job "gitea" {
network { network {
mode = "bridge" mode = "bridge"
port "ingress" { port "ingress" {
to = 3000 static = 3000
} }
} }
@@ -38,7 +44,7 @@ job "gitea" {
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.gitea.tls=true", "traefik.http.routers.gitea.tls=true",
"traefik.http.routers.gitea.entrypoints=websecure", "traefik.http.routers.gitea.entrypoints=websecure",
"traefik.http.routers.gitea.rule=Host(`git.example.local`)" "traefik.http.routers.gitea.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
] ]
check { check {
@@ -53,7 +59,7 @@ job "gitea" {
driver = "docker" driver = "docker"
config { config {
image = "gitea/gitea:1.21.1" image = "gitea/gitea:1.22.1"
ports = ["ingress"] ports = ["ingress"]
} }
@@ -61,6 +67,11 @@ job "gitea" {
volume = "gitea-data" volume = "gitea-data"
destination = "/data" destination = "/data"
} }
resources {
cpu = 3000
memory = 2000
}
} }
} }
} }

View File

@@ -7,20 +7,46 @@ Gitea should be configured to not utilize SSH as the job spec does not support i
You will need to modify the job spec items listed under [TODO](./readme.md#TODO) but there are no Gitea specific adjustments needed. If you run it, it will register with consul and be available to Traefik for routing. If the domain name is configured correctly, you should be able to reach the Gitea setup page to make the needed configuration changes. You will need to modify the job spec items listed under [TODO](./readme.md#TODO) but there are no Gitea specific adjustments needed. If you run it, it will register with consul and be available to Traefik for routing. If the domain name is configured correctly, you should be able to reach the Gitea setup page to make the needed configuration changes.
## Service Dependencies ## Service Dependencies
- A Valid [Host Volume](../../../host_init/README.md#Storage%20and%20ZFS) - A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs)
- [Postgres](../postgres/readme.md) - [Postgres](../postgres/readme.md)
## TODO ## TODO
If you want to deploy this, you will need to verify you have a valid host volume and update the domain name in the job spec. If you want to deploy this, you will need to verify you have a valid host volume.
| Line | Default | Adjustment | | Line | Default | Adjustment |
| --- | --- | --- | | --- | --- | --- |
| 17 | `source = "gitea-data"` | Change `gitea-data` to a valid host volume name | | 17 | `source = "gitea-data"` | Change `gitea-data` to a valid host volume name |
| 46 | `"traefik.http.routers.caddy.rule=Host('git.example.com')"` | Change `git.example.com` to your domain name |
| 66 | `volume = "gitea-data"` | Change `gitea-data` to the host volume defined on line 15 if applicable | | 66 | `volume = "gitea-data"` | Change `gitea-data` to the host volume defined on line 15 if applicable |
> To make the instance accessible through TRAEFIK you will need to define the domain to listen on by setting the value(s) on lines 2 and 3.
## Configuring Gitea ## Configuring Gitea
There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Gitea. Postgres should be mounted to the container on the standard `5432` port so you can select postgres as the database type and use `127.0.0.1:5432` as the address and input the username, password, and database name you created for Gitea to use. There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Gitea. Postgres should be mounted to the container on the standard `5432` port so you can select postgres as the database type and use `127.0.0.1:5432` as the address and input the username, password, and database name you created for Gitea to use.
If you need help making those credentials, take a look at the [postgres readme](../postgres/readme.md#Make%20a%20New%20Database). If you need help making those credentials, take a look at the [postgres readme](../postgres/readme.md#make-a-new-database).
# Adding CI/CD
Gitea has a fork of act runner that can be used to run Github actions. In order to deploy this with Nomad, you will need to leverage Docker in Docker (DinD) with privileged mode enabled in Docker or pay for the bussiness plan of Docker to have better app isolation. The default runner image provided by Gitea was failing to start DinD Daemon so I included a dockerfile that you can use to specify that the container should be ran as the root user.
1. Build Image
```bash
docker build --network host --platform linux/amd64 -t <your_gitea_domain>/caleb/nomad_act_runner:0.0.1 .
```
> [!NOTE]
> You may not need to specify the platform flag. If you use Apple Silicon but deploy to X86, you will need to include the flag.
2. Push Image
```bash
docker push <your_gitea_domain>/caleb/nomad_act_runner:0.0.1
```
4. Run the nomad job with the Gitea_Runner_Token
```bash
nomad job run -var "grt=<your_token>" -var "domain=<gitea_domain>" runner.nomad.hcl
```
> [!NOTE]
> If you prefer to not use cli variables, you can update the top of the Nomad Job Spec and manually put in the env variables.

View File

@@ -0,0 +1,71 @@
variable "grt" {
type = string
description = "Gitea runner token"
}
variable "domain" {
type = string
description = "Gitea Domain Name"
}
locals {
GITEA_RUNNER_TOKEN = var.grt # Replace with raw token surrounded by quotes if you don't want to pass via cli or using web ui
GITEA_DOMAIN = var.domain # Replace with domain surrounded by quotes if you don't want to pass via cli or using web ui
GITEA_RUNNER_NAME = "${NOMAD_TASK_NAME}-${NOMAD_ALLOC_INDEX}"
}
job "gitea-runner" {
datacenters = ["dc1"]
type = "service"
group "application" {
count = 1
scaling {
enabled = true
min = 1
max = 5
}
network {
mode = "bridge"
}
service {
name = "gitea-runner"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "gitea"
local_bind_address = "127.0.0.1"
local_bind_port = 3000
}
}
tags = ["traefik.enable=false"] # Hide envoy from traefik
}
}
}
task "gitea-runner" {
driver = "docker"
config {
image = "${local.GITEA_DOMAIN}/caleb/nomad_act_runner:0.0.1"
privileged = true
}
env = {
GITEA_INSTANCE_URL="http://${NOMAD_UPSTREAM_ADDR_gitea}"
GITEA_RUNNER_REGISTRATION_TOKEN="${local.GITEA_RUNNER_TOKEN}"
GITEA_RUNNER_NAME="${local.GITEA_RUNNER_NAME}"
}
resources {
cpu = 2000
memory = 2000
}
}
}
}

View File

@@ -0,0 +1,89 @@
locals {
SUBDOMAIN = "jellyfin." // End with dot or leave blamk for root domain
DOMAIN = "example.com"
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
}
job "jellyfin" {
datacenters = ["dc1"]
type = "service"
group "application" {
count = 1
network {
mode = "host"
port "httpIngress" { static = 8096 }
port "serviceDiscovery" { static = 1900 }
port "clientDiscovery" { static = 7359 }
}
volume "jellyfin-cache" {
type = "host"
source = "jellyfinCache"
}
volume "jellyfin-config" {
type = "host"
source = "jellyfinConfig"
}
volume "jellyfin-data" {
type = "host"
source = "media"
}
service {
name = "jellyfin"
port = "httpIngress"
tags = [
"traefik.enable=true",
"traefik.http.routers.jellyfin.tls=true",
"traefik.http.routers.jellyfin.entrypoints=websecure",
"traefik.http.routers.jellyfin.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
]
check {
type = "http"
path = "/health"
interval = "10s"
timeout = "2s"
}
}
task "jellyfin" {
driver = "docker"
config {
image = "jellyfin/jellyfin:2024080505"
ports = ["httpIngress", "serviceDiscovery", "clientDiscovery"]
}
env = {
JELLYFIN_PublishedServerUrl="${local.TRAEFIK_DOMAIN}"
}
volume_mount {
volume = "jellyfin-cache"
destination = "/cache"
}
volume_mount {
volume = "jellyfin-config"
destination = "/config"
}
volume_mount {
volume = "jellyfin-data"
destination = "/media"
}
resources {
cpu = 2000
memory = 1024
memory_max = 2048
}
}
}
}

View File

@@ -0,0 +1,36 @@
# Jellyfin
Jellyfin is a Free Software Media System that puts you in control of managing and streaming your media. It is an alternative to the proprietary Emby and Plex, to provide media from a dedicated server to end-user devices via multiple apps.
## Nomad Job for Gitea
You will need to modify the job spec items listed under [TODO](./readme.md#TODO) but there are no Jellyfin specific adjustments needed. If you run it, it will register with consul and be available to Traefik for routing. If the domain name is configured correctly, you should be able to reach the Jellyfin setup page to make the needed configuration changes such as defining the media libraries.
## Service Dependencies
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Cache
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Config
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Media
## TODO
If you want to deploy this, you will need to verify you have the necessary host volumes.
| Line | Default | Adjustment |
| ---- | --------------------------- | --------------------------------------------------------------------------- |
| 23 | `source = "jellyfinCache"` | Change `jellyfinCache` to a valid host volume name |
| 28 | `source = "jellyfinConfig"` | Change `jellyfinConfig` to a valid host volume name |
| 33 | `source = "media"` | Change `media` to a valid host volume name |
| 68 | `volume = "jellyfinCache"` | Change `jellyfinCache` to the host volume defined on line 21 if applicable |
| 74 | `volume = "jellyfinConfig"` | Change `jellyfinConfig` to the host volume defined on line 26 if applicable |
| 79 | `volume = "media"` | Change `media` to the host volume defined on line 31 if applicable |
> To make the instance accessible through TRAEFIK you will need to define the domain to listen on by setting the value(s) on lines 2 and 3.
## Configuring Jellyfin
There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Jellyfin.
> I recomend using a single root directory for media and then creating subdirectories for each type of media. This will make it easier to manage the media via SFTP and to configure Jellyfin.
> If this is deployed on Alpine Linux, you won't be able to pass through dedicated NVIDIA hardware because `nvidia-container-toolkit` is not available on MUSL. You will need to use a different root operating system like Ubuntu if you want hardware acceleration with NVIDIA hardware.

View File

@@ -0,0 +1,36 @@
job "mailcatcher" {
datacenters = ["dc1"]
group "mailcatcher" {
count = 1
network {
mode = "bridge"
port "webUI" {
to = 1080
}
}
service {
# Make available to other services by the 'fake-smtp' name
name = "fake-smtp"
port = "1025"
tags = ["traefik.enable=false"] # Hide redis from traefik
connect {
sidecar_service {
tags = ["traefik.enable=false"] # Hide redis envoy from traefik
}
}
}
task "mailcatcher" {
driver = "docker"
config {
image = "sj26/mailcatcher:latest"
ports = ["webUI"]
}
}
}
}

View File

@@ -0,0 +1,7 @@
# Mailcatcher
Mailcatcher is a simple SMTP server that catches all mail sent to it and displays it in a web interface. This is useful for development and testing of email sending services. This is not a production ready service and should not be used in a production environment.
## Nomad Job for Mailcatcher
Mailcatcher requires no configuration but is only available through the service mesh for sending SMTP mail. The service to connect to is fittingly called `fake-smtp` and is available to the localhost on port `1025`. There is no need for an email or password and the service is not encrypted so you should not use TLS or SSL. (It might work, I just haven't tried it.) - This is not something you should expose to the internet. It's just for testing and development.
Once you have sent an email to the fake SMTP server, you can view the email in the web interface. The web interface has a dynamic port assigned by Nomad so you will need to go to the Task Allocation page in the Nomad UI where you can open the link under 'WebUI' to view the emails.

View File

@@ -0,0 +1,59 @@
job "minio-singleton" {
datacenters = ["dc1"]
type = "service"
group "minio" {
count = 1
network {
mode = "bridge"
port "console" {
to = 9090
}
}
service {
# Make available to other services by the 'minio-singleton' name
name = "minio-singleton"
port = "9000"
tags = ["traefik.enable=false"] # Hide minio from traefik
connect {
sidecar_service {
tags = ["traefik.enable=false"] # Hide minio from traefik
}
}
}
volume "minio-data" {
type = "host"
source = "minio"
}
task "minio" {
driver = "docker"
volume_mount {
volume = "minio-data"
destination = "/data"
}
config {
image = "quay.io/minio/minio"
ports = ["console"]
command = "server"
args = ["/data", "--console-address", ":9090"]
}
resources {
cpu = 100
memory = 2000
}
env {
MINIO_ROOT_USER="op://InfraSecrets/Minio-Singleton/username"
MINIO_ROOT_PASSWORD="op://InfraSecrets/Minio-Singleton/password"
}
}
}
}

View File

@@ -0,0 +1,143 @@
# The use of Minio in this stack is not architected for high availability or
# data integrity and as such, is not recommended for production use. Instead,
# this is for making an s3 compatible storage available to the service mesh
# and ZFS is relied upon for data integrity within a single node storage pool.
# For a production ready Minio deployment, please start with the following:
# https://min.io/docs/minio/kubernetes/upstream/operations/concepts/architecture.html
# Note: This configures TWO minio instances, one for "HOT" storage made up of
# SSDs and a "WARM" instance with HDDs instead. Manual configuration of tiers
# is required to make use of this feature. TODO: Automate this.
job "minio" {
datacenters = ["dc1"]
type = "service"
group "minio" {
count = 1
network {
mode = "bridge"
port "console" {
to = 9090
}
}
service {
# Make available to other services by the 'minio' name
name = "minio"
port = "9000"
tags = ["traefik.enable=false"] # Hide minio from traefik
connect {
sidecar_service {
tags = ["traefik.enable=false"] # Hide minio from traefik
}
}
}
service {
name = "minio-backend-envoy"
tags = ["traefik.enable=false"] # Hide minio-backend from traefik
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "minio-backend"
local_bind_address = "127.0.0.1"
local_bind_port = 9001
}
}
tags = ["traefik.enable=false"] # Hide minio-backend from traefik
}
}
}
volume "minio-data" {
type = "host"
source = "minio-ssd" # Tier 1 Storage Host Volume
}
task "minio" {
driver = "docker"
volume_mount {
volume = "minio-data"
destination = "/data"
}
config {
image = "quay.io/minio/minio"
ports = ["console"]
command = "server"
args = ["/data", "--console-address", ":9090"]
}
resources {
cpu = 100
memory = 2000
}
env {
MINIO_ROOT_USER="op://InfraSecrets/Minio Tier 1/username"
MINIO_ROOT_PASSWORD="op://InfraSecrets/Minio Tier 1/password"
}
}
}
group "minio-hdd" {
count = 1
network {
mode = "bridge"
port "console" {
to = 9090
}
}
service {
name = "minio-backend"
port = "9000"
tags = ["traefik.enable=false"] # Hide minio-backend from traefik
connect {
sidecar_service {
tags = ["traefik.enable=false"] # Hide minio-backend from traefik
}
}
}
volume "minio-warm-data" {
type = "host"
source = "minio-hdd" # Tier 2 Storage Host Volume
}
task "minio-hdd" {
driver = "docker"
volume_mount {
volume = "minio-warm-data"
destination = "/data"
}
config {
image = "quay.io/minio/minio"
ports = ["console"]
command = "server"
args = ["/data", "--console-address", ":9090"]
}
resources {
cpu = 100
memory = 2000
}
env {
MINIO_ROOT_USER="op://InfraSecrets/Minio Tier 2/username"
MINIO_ROOT_PASSWORD="op://InfraSecrets/Minio Tier 2/password"
}
}
}
}

View File

@@ -0,0 +1,48 @@
# Minio
Minio is an open source object storage server that is compatible with Amazon S3. You can use it to store and retrieve data from any application that requires s3 storage. You can configure storage tiers and lifecycle policies to manage your data with things like retention and expiration or movements between storage classes.
The use of Minio in this stack is not architected for high availability or data integrity and as such, is not recommended for production use of any reasonably large scale. Instead, this is for making an s3 compatible storage available to the service mesh and ZFS is relied upon for data integrity within a single node storage pool.
For a production ready Minio deployment, please start with the following:
https://min.io/docs/minio/kubernetes/upstream/operations/concepts/architecture.html
## Nomad Job for Minio
Nomad requires a Host Volume to persist data across restarts. This will limit the portability of the running instance but it is simple to configure. If you want have dynamic storage, you will need to modify the job spec to use a different storage driver such as [Ceph](https://docs.ceph.com/en/latest/start/intro/) or [Seaweedfs](https://github.com/seaweedfs/seaweedfs/wiki). Both provide object storage that is S3 compatible so if you deploy those, you may not have a need for Minio but the admin interface and features of Minio may still meet your needs better.
### Minio-Singleton (minio-singleton.nomad.hcl)
This job is for a single instance of Minio with no tiering. It is the simplest configuration and is suitable for a small amount of data. It is great for getting started with your own S3 compatible storage.
#### Service Dependencies
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs)
#### TODO
If you want to deploy this, you will need to verify you have a valid host volume and set the initial root credentials.
| Line | Default | Adjustment |
| --- | --- | --- |
| 30 | `source = "minio"` | Change `minio` to a valid host volume name if applicable |
| 37 | `volume = "minio-data"` | Change `minio-data` to the host volume defined on line 28 if applicable |
| 54 | `"MINIO_ROOT_USER"="op://InfraSecrets/Minio-Singleton/username"` | Change the value to the root username you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
| 55 | `"MINIO_ROOT_PASSWORD"="op://InfraSecrets/Minio-Singleton/password"` | Change the value to the root password you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
### Minio-Tiered (minio.nomad.hcl)
This job is for an instance of Minio with tiering. If your host has SSDs and HDDs, you can configure Minio to use the SSDs for "HOT" storage and the HDDs for "WARM" storage. This is useful for managing data that is accessed frequently and data that is accessed infrequently.
> IMPORTANT: Tiering is not a backup solution! You should still have a backup strategy for your data. Best to make backups regularly and follow at least the 3-2-1 rule: 3 copies of your data, 2 on different media, 1 offsite.
#### Service Dependencies
- Two Valid [Host Volumes](../../../host_init/README.md#storage-and-zfs)
#### TODO
If you want to deploy this, you will need to verify you have a valid host volume and set the initial root credentials.
| Line | Default | Adjustment |
| --- | --- | --- |
| 59 | `source = "minio-ssd"` | Change `minio-ssd` to a valid host volume name if applicable |
| 66 | `volume = "minio-data"` | Change `minio-data` to the host volume defined on line 57 if applicable |
| 83 | `"MINIO_ROOT_USER"="op://InfraSecrets/Minio Tier 1/username"` | Change the value to the root username you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
| 84 | `"MINIO_ROOT_PASSWORD"="op://InfraSecrets/Minio Tier 1/password"` | Change the value to the root password you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
| 113 | `source = "minio-hdd"` | Change `minio-hdd` to a valid host volume name if applicable |
| 121 | `volume = "minio-warm-data"` | Change `minio-warm-data` to the host volume defined on line 111 if applicable |
| 138 | `"MINIO_ROOT_USER"="op://InfraSecrets/Minio Tier 2/username"` | Change the value to the root username you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
| 139 | `"MINIO_ROOT_PASSWORD"="op://InfraSecrets/Minio Tier 2/password"` | Change the value to the root password you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |

View File

@@ -0,0 +1,241 @@
# Listening Domain
locals {
SUBDOMAIN = "penpot." // End with dot or leave blamk for root domain
DOMAIN = "example.local"
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
}
# Application routing environment variables
locals {
PENPOT_PUBLIC_URI = "https://${local.TRAEFIK_DOMAIN}"
PENPOT_BACKEND_URI = "http://127.0.0.1:6060"
PENPOT_EXPORTER_URI = "http://127.0.0.1:6061"
PENPOT_FLAGS = "enable-smtp enable-registration enable-login-with-password enable-demo-users"
PENPOT_SECRET_KEY = "op://InfraSecrets/7hbsxng22unjqc4wkj62qniu2u/credential" # Try running `openssl rand -hex 32` to generate a random secret key
PENPOT_DATABASE_URI = "postgresql://127.0.0.1:5432/penpot"
PENPOT_DATABASE_USERNAME = "op://InfraSecrets/Postgres - Penpot User/username"
PENPOT_DATABASE_PASSWORD = "op://InfraSecrets/Postgres - Penpot User/password"
PENPOT_REDIS_URI = "redis://127.0.0.1:6379/0"
PENPOT_TELEMERY_ENABLED = "false"
}
# Assets storage environment variables (fs or s3)
locals {
// PENPOT_ASSETS_STORAGE_BACKEND = "assets-fs"
PENPOT_STORAGE_ASSETS_FS_DIRECTORY = "/opt/data/assets"
PENPOT_ASSETS_STORAGE_BACKEND = "assets-s3"
AWS_ACCESS_KEY_ID = "op://InfraSecrets/Penpot S3 Key/username"
AWS_SECRET_ACCESS_KEY = "op://InfraSecrets/Penpot S3 Key/credential"
PENPOT_STORAGE_ASSETS_S3_ENDPOINT = "http://127.0.0.1:9000"
PENPOT_STORAGE_ASSETS_S3_BUCKET = "penpot"
}
# SMTP environment variables
locals {
PENPOT_SMTP_DEFAULT_FROM = "no-reply+penpot@${local.DOMAIN}"
PENPOT_SMTP_DEFAULT_REPLY_TO = "no-reply+penpot@${local.DOMAIN}"
PENPOT_SMTP_HOST = "127.0.0.1"
PENPOT_SMTP_PORT = "1025"
PENPOT_SMTP_USERNAME = ""
PENPOT_SMTP_PASSWORD = ""
PENPOT_SMTP_TLS = "false"
PENPOT_SMTP_SSL = "false"
}
job "penpot" {
datacenters = ["dc1"]
group "frontend" {
count = 1
network {
mode = "bridge"
port "ingress" {
to = 80
}
}
# Expose frontend to internet through traefik
service {
name = "penpot"
port = "ingress"
tags = [
"traefik.enable=true",
"traefik.http.routers.penpot.tls=true",
"traefik.http.routers.penpot.entrypoints=websecure",
"traefik.http.routers.penpot.rule=Host(`${local.TRAEFIK_DOMAIN}`)",
]
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "penpot-backend"
local_bind_address = "127.0.0.1"
local_bind_port = 6060
}
upstreams {
destination_name = "penpot-exporter"
local_bind_address = "127.0.0.1"
local_bind_port = 6061
}
upstreams {
destination_name = "minio"
local_bind_address = "127.0.0.1"
local_bind_port = 9000
}
}
tags = ["traefik.enable=false"] # Hide service from traefik
}
}
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
task "frontend" {
driver = "docker"
config {
image = "penpotapp/frontend:2.0.1"
ports = ["ingress"]
}
env {
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
PENPOT_BACKEND_URI = local.PENPOT_BACKEND_URI
PENPOT_EXPORTER_URI = local.PENPOT_EXPORTER_URI
PENPOT_FLAGS = local.PENPOT_FLAGS
}
}
}
group "backend" {
network {
mode = "bridge"
}
service {
# Make available to other services by the 'penpot-backend' name
name = "penpot-backend"
port = "6060"
tags = ["traefik.enable=false"] # Hide redis from traefik
# Make available through the consul service mesh
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_address = "127.0.0.1"
local_bind_port = 5432
}
upstreams {
destination_name = "redis-cache"
local_bind_address = "127.0.0.1"
local_bind_port = 6379
}
upstreams {
destination_name = "minio"
local_bind_address = "127.0.0.1"
local_bind_port = 9000
}
upstreams {
destination_name = "fake-smtp"
local_bind_address = "127.0.0.1"
local_bind_port = 1025
}
}
tags = ["traefik.enable=false"] # Hide penpot-backend envoy from traefik
}
}
}
task "backend" {
driver = "docker"
config {
image = "penpotapp/backend:2.0.1"
ports = ["ingress"]
}
env {
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
PENPOT_SECRET_KEY = local.PENPOT_SECRET_KEY
PENPOT_DATABASE_URI = local.PENPOT_DATABASE_URI
PENPOT_DATABASE_USERNAME = local.PENPOT_DATABASE_USERNAME
PENPOT_DATABASE_PASSWORD = local.PENPOT_DATABASE_PASSWORD
PENPOT_REDIS_URI = local.PENPOT_REDIS_URI
PENPOT_FLAGS = local.PENPOT_FLAGS
PENPOT_TELEMERY_ENABLED = local.PENPOT_TELEMERY_ENABLED
PENPOT_ASSETS_STORAGE_BACKEND = local.PENPOT_ASSETS_STORAGE_BACKEND
PENPOT_STORAGE_ASSETS_FS_DIRECTORY = local.PENPOT_STORAGE_ASSETS_FS_DIRECTORY
AWS_ACCESS_KEY_ID = local.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = local.AWS_SECRET_ACCESS_KEY
PENPOT_STORAGE_ASSETS_S3_ENDPOINT = local.PENPOT_STORAGE_ASSETS_S3_ENDPOINT
PENPOT_STORAGE_ASSETS_S3_BUCKET = local.PENPOT_STORAGE_ASSETS_S3_BUCKET
PENPOT_SMTP_DEFAULT_FROM = local.PENPOT_SMTP_DEFAULT_FROM
PENPOT_SMTP_DEFAULT_REPLY_TO = local.PENPOT_SMTP_DEFAULT_REPLY_TO
PENPOT_SMTP_HOST = local.PENPOT_SMTP_HOST
PENPOT_SMTP_PORT = local.PENPOT_SMTP_PORT
PENPOT_SMTP_USERNAME = local.PENPOT_SMTP_USERNAME
PENPOT_SMTP_PASSWORD = local.PENPOT_SMTP_PASSWORD
PENPOT_SMTP_TLS = local.PENPOT_SMTP_TLS
PENPOT_SMTP_SSL = local.PENPOT_SMTP_SSL
}
resources {
cpu = 8000
memory = 1024
memory_max = 2048
}
}
}
group "exporter" {
network {
mode = "bridge"
}
task "exporter" {
driver = "docker"
config {
image = "penpotapp/exporter:2.0.1"
}
env {
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
PENPOT_REDIS_URI = local.PENPOT_REDIS_URI
}
}
service {
name = "penpot-exporter"
port = "6061"
tags = ["traefik.enable=false"] # Hide envoy from traefik
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "redis-cache"
local_bind_address = "127.0.0.1"
local_bind_port = 6379
}
}
tags = ["traefik.enable=false"] # Hide envoy from traefik
}
}
}
}
}

View File

@@ -0,0 +1,21 @@
# Penpot
Penpot is the Open-Source Design & Prototyping Tool for Product Teams. It is a great alternative to Figma or Adobe XD that you can host yourself. Learn all about it on their [website](https://penpot.app/).
## Nomad Job for Penpot
Penpot already hosts documentation related to their architecture and how to deploy it; therefore, this will not duplicate documentation that can be found on their website such as available flags that can be applied. The nomad spec available in this repository defined the frontend, backend, and exporter services. It is expected that you already have the service dependencies running and available to the Penpot service as well as valid postgres credentials.
If you need help making those credentials, take a look at the [postgres readme](../postgres/readme.md#make-a-new-database).
## Service Dependencies
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) or an S3 storage provider like [Minio](../minio/readme.md)
- [Postgres](../postgres/readme.md)
- [Redis Cache](../redis/readme.md)
- *optional* - An SMTP server like [mailcatcher](../mailcatcher/readme.md) or a legitimate mail server
## Configuring Penpot
Provided you do not need to make adjustments to the services exposed via the consul sidecar, the only edits needed are lines 1-42. The 'locals' blocks are referenced in other sections of the spec and as such we can do all of our configuration in one place. Some notable items are as follows:
- TRAEFIK_DOMAIN - The domain you will be registering with traefik for access to the application
- PENPOT_FLAGS - remove 'enable_smtp' if you are not going to give SMTP configuration
- PENPOT_STORAGE_BACKEND - The configurations are given for both file system and minio storage. It doesn't affect anything if you include environment variables for both but you must make sure that you have the storage backend set to either 'assests-fs' or 'assets-s3' to inform Penpot which you intend to use. If you are using a filesystem and wish to persist data, you will need to make sure that your host volumes are properly configured.

View File

@@ -7,7 +7,7 @@ Nomad requires a Host Volume to persist data across restarts. This will limit th
Postgres will have a default user which is a good one to use for making application specific users and databases. These are defined through environment variables in the nomad job spec so you only need to edit the job spec to meet your requirements. If you run it, it will register with consul but be hidden to Traefik meaning you can only access it through the service mesh. Postgres will have a default user which is a good one to use for making application specific users and databases. These are defined through environment variables in the nomad job spec so you only need to edit the job spec to meet your requirements. If you run it, it will register with consul but be hidden to Traefik meaning you can only access it through the service mesh.
## Service Dependencies ## Service Dependencies
- A Valid [Host Volume](../../../host_init/README.md#Storage%20and%20ZFS) - A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs)
## TODO ## TODO
If you want to deploy this, you will need to verify you have a valid host volume and set the initial postgres root credentials. If you want to deploy this, you will need to verify you have a valid host volume and set the initial postgres root credentials.
@@ -16,8 +16,8 @@ If you want to deploy this, you will need to verify you have a valid host volume
| --- | --- | --- | | --- | --- | --- |
| 17 | `source = "postgres"` | Change `postgres` to a valid host volume name | | 17 | `source = "postgres"` | Change `postgres` to a valid host volume name |
| 38 | `volume = "postgres-data"` | Change `postgres-data` to the host volume defined on line 15 if applicable | | 38 | `volume = "postgres-data"` | Change `postgres-data` to the host volume defined on line 15 if applicable |
| 48 | `"POSTGRES_USER"="op://InfraSecrets/Postgres Root/username"` | Change the value to the root username you want. By default, this is a 1password path. See [Managing Secrets](../../REAMDE.md#Managing_Secrets) for more information | | 48 | `"POSTGRES_USER"="op://InfraSecrets/Postgres Root/username"` | Change the value to the root username you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
| 49 | `"POSTGRES_PASSWORD"="op://InfraSecrets/Postgres Root/password"` | Change the value to the root password you want. By default, this is a 1password path. See [Managing Secrets](../../REAMDE.md#Managing_Secrets) for more information | | 49 | `"POSTGRES_PASSWORD"="op://InfraSecrets/Postgres Root/password"` | Change the value to the root password you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
## Make a New Database ## Make a New Database

View File

@@ -0,0 +1,90 @@
# Listening Domain
locals {
SUBDOMAIN = "umami" // End with dot or leave blamk for root domain
DOMAIN = "example.com"
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
}
// OP is 1Password for CLI
locals {
OP_DB_USER = "op://InfraSecrets/Umami/ENV_SECRETS/PostgresUsername"
OP_DB_PASSWORD = "op://InfraSecrets/Umami/ENV_SECRETS/PostgresPassword"
OP_AppSecret = "op://InfraSecrets/Umami/ENV_SECRETS/AppSecret"
}
locals {
USER_PASSWORD = "${local.OP_DB_USER}:${local.OP_DB_PASSWORD}"
UMAMI_APPSECRET = "${local.OP_AppSecret}"
UMAMI_DB_URL = "postgresql://${local.USER_PASSWORD}@127.0.0.1:5432/umami"
UMAMI_DB_TYPE = "postgresql"
}
job "umami" {
datacenters = ["dc1"]
type = "service"
group "application" {
count = 1
network {
mode = "bridge"
port "httpIngress" {
to = 3000
}
}
service {
name = "umami"
port = "httpIngress"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_address = "127.0.0.1"
local_bind_port = 5432
}
}
tags = ["traefik.enable=false"] # Hide envoy from traefik
}
}
tags = [
"traefik.enable=true",
"traefik.http.routers.umami.tls=true",
"traefik.http.routers.umami.entrypoints=websecure",
"traefik.http.routers.umami.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
]
check {
type = "http"
path = "/api/heartbeat"
interval = "10s"
timeout = "2s"
}
}
task "umami" {
driver = "docker"
config {
image = "ghcr.io/umami-software/umami:postgresql-latest"
ports = ["httpIngress"]
}
env = {
DATABASE_URL="${local.UMAMI_DB_URL}"
DATABASE_TYPE="${local.UMAMI_DB_TYPE}"
APP_SECRET:"${local.UMAMI_APPSECRET}"
}
resources {
cpu = 1000
memory = 512
memory_max = 1024
}
}
}
}

View File

@@ -5,7 +5,7 @@
- name: Update consul config - name: Update consul config
ansible.builtin.copy: ansible.builtin.copy:
mode: preserve mode: preserve
src: ./host_config/consul.hcl src: ../host_config/consul.hcl
dest: /etc/consul/server.hcl dest: /etc/consul/server.hcl
- name: Restart consul service - name: Restart consul service

View File

@@ -5,7 +5,7 @@
- name: Update nomad config - name: Update nomad config
ansible.builtin.copy: ansible.builtin.copy:
mode: preserve mode: preserve
src: ./host_config/nomad.hcl src: ../host_config/nomad.hcl
dest: /etc/nomad.d/server.hcl dest: /etc/nomad.d/server.hcl
- name: Restart nomad service - name: Restart nomad service

View File

@@ -5,7 +5,7 @@
- name: Update traefik config - name: Update traefik config
ansible.builtin.copy: ansible.builtin.copy:
mode: preserve mode: preserve
src: ./host_config/traefik.yml src: ../host_config/traefik.yml
dest: /etc/traefik/traefik.yaml # Alpine default config is yaml dest: /etc/traefik/traefik.yaml # Alpine default config is yaml
- name: Restart traefik service - name: Restart traefik service