Compare commits
36 Commits
41579e2771
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 696813fc72 | |||
| d099c4dcc2 | |||
| 177d7b480c | |||
| 641beb1f81 | |||
| 0e5712a05e | |||
| 318c8e5ae1 | |||
| 75fcfb90bf | |||
| 41c1543979 | |||
| 8663b87f0f | |||
| d4e2b4a241 | |||
| 695acaa48a | |||
| 71c46ad953 | |||
| fdacbec5ea | |||
| 16bb096e34 | |||
| 49a25fff41 | |||
| 829bb876e0 | |||
| 01337c1e84 | |||
| fca39e54b3 | |||
| 9c64bb856f | |||
| 927d57e9c9 | |||
| 149fd307f2 | |||
| b686486c93 | |||
| 8428e1a269 | |||
| 7509ca056e | |||
| 6926e74b8a | |||
| 0197e47292 | |||
| d0bc4a17d4 | |||
| 83529ef8ef | |||
| fd42e1f6b0 | |||
| 1b9c244cb0 | |||
| ffe8c8fecd | |||
| ad8fd86bcb | |||
| 77f6639929 | |||
| 94b8e0694c | |||
| c5346ef2ed | |||
| ec9f503d57 |
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.DS_Store
|
||||
*.secret
|
||||
@@ -50,5 +50,3 @@ Be aware, that if you have an error it could cause the service to fail to start
|
||||
|
||||
### Nomad Jobs
|
||||
This is where the nomad job specs are stored. You can learn more about the job specs in the [nomad_jobs readme](./nomad_jobs/README.md).
|
||||
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ client {
|
||||
path = "/hdd/gitea/"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
|
||||
host_volume "minio-ssd" {
|
||||
path = "/ssd/minio/"
|
||||
read_only = false
|
||||
@@ -50,9 +50,33 @@ client {
|
||||
path = "/ssd/sqlite/"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
host_volume "jellyfinCache" {
|
||||
path = "/hdd/multimedia/cache/"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
host_volume "jellyfinConfig" {
|
||||
path = "/hdd/multimedia/config/"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
host_volume "media" {
|
||||
path = "/hdd/multimedia/media/"
|
||||
read_only = false
|
||||
}
|
||||
}
|
||||
|
||||
plugin "docker" {
|
||||
config {
|
||||
allow_privileged = true
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ui {
|
||||
# Comment to disable UI, it listens on port 4646
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
- name: Enable community packages
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/apk/repositories
|
||||
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
state: present
|
||||
|
||||
- name: Update apk packages
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
- name: Enable community packages
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/apk/repositories
|
||||
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
state: present
|
||||
|
||||
- name: Update apk packages
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
- name: Enable community packages
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/apk/repositories
|
||||
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
state: present
|
||||
|
||||
- name: Update apk packages
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
- name: Install Caddy on Alpine Linux
|
||||
- name: Install Traefik on Alpine Linux
|
||||
hosts: all
|
||||
|
||||
tasks:
|
||||
- name: Enable community packages
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/apk/repositories
|
||||
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
state: present
|
||||
|
||||
- name: Update apk packages
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// Write me a cheatsheet for ZFS
|
||||
|
||||
# ZFS Cheatsheet
|
||||
|
||||
| Command | Description |
|
||||
|
||||
42
nomad_jobs/README.md
Normal file
42
nomad_jobs/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Nomad Job Specs
|
||||
|
||||
This directory contains two sub-directories: `apps` and `services`. If you are going to ignore this README you should at least read how this repository [Manages Secrets](#managing-secrets).
|
||||
|
||||
## Apps Directory
|
||||
|
||||
The `apps` directory includes the Nomad job specifications for deployable applications along with their service dependencies. In this directory, you can find the Nomad job spec for the Penpot application, which includes all the necessary services required for its deployment.
|
||||
|
||||
These job specs are quite large and tough to reason about so it is recommended that you use the services directory to deploy applications after having deployed their dependencies. This is a good way to just get an app up and running. You will still need to investigate the job specs to make sure they meet your requirements such as having the proper host volumes available. (By default, host volumes are not used meaning data will not persist across restarts.)
|
||||
|
||||
**WARNING**: The orchestrator could restart your service at any time. If you do not have a host volume, you will lose all your data.
|
||||
|
||||
## Services Directory
|
||||
|
||||
The `services` directory contains standalone services that can be deployed without embedding dependencies in the job specifications. These are much smaller specs and easier to update but the administrator needs to ensure that the necessary services are deployed in advance such as Postgres being available before deploying Gitea. These dependencies are documented in the service readme.
|
||||
|
||||
## Managing Secrets
|
||||
|
||||
Many of the nomad jobs require secrets to be placed in the job spec. While you could integrate with a secrets provider like [Hashicorp Vault](https://www.vaultproject.io/), this is an additional service to manage and maintain. I definitely encourage you to take a look as it provides a lot of value such as secret rotation and auditing.
|
||||
|
||||
This repo uses [1password secret references](https://developer.1password.com/docs/cli/secret-references) for anything such as credentials or crypto strings you would need to set upon deployment. This allows you to easily see what fields you may need to set or provides a secure way to manage all the secrets you need to deploy your applications without risking them being added to version control by mistake.
|
||||
|
||||
If you choose to use [1password](https://1password.com/), you will need to install the [1password cli](https://support.1password.com/command-line-getting-started/) and login to your account. You can then use the `op` command to retrieve secrets from their respective vault and create an output file with the secrets injected.
|
||||
|
||||
The [1password cli](https://developer.1password.com/docs/cli/) is used to retrieve secrets and inject them into the job spec. This is done through the `op inject` command documented [here](https://developer.1password.com/docs/cli/secrets-config-files#step-2-inject-the-secrets).
|
||||
|
||||
```bash
|
||||
op inject -i postgres.nomad.hcl -o postgres.nomad.hcl.secret
|
||||
```
|
||||
> Anything ending in `.secret` is ignored by git so you can safely output the secrets in the job spec without worrying about them being committed to version control.
|
||||
|
||||
|
||||
# Available Services
|
||||
|
||||
| Service | Description by LLM | Service | App Spec |
|
||||
| --- | --- | --- | -- |
|
||||
| Caddy | Caddy is a web server and reverse proxy with automatic HTTPS written in Go. | [Service Readme](./services/caddy/readme.md) |
|
||||
| Gitea | Gitea is a self-hosted Git service written in Go. | [Service Readme](./services/gitea/readme.md) | [App Spec](./apps/gitea-standalone.nomad.hcl) |
|
||||
| Minio | MinIO is a high performance object storage server compatible with Amazon S3 APIs | [Service Readme](./services/minio/readme.md) | |
|
||||
| Penpot | Penpot is the first Open Source design and prototyping platform meant for cross-domain teams. Non dependent on operating systems, Penpot is web based and works with open web standards (SVG). For all and empowered by the community. | [Service Readme](./services/penpot/readme.md) | [App Spec](./apps/penpot-standalone.nomad.hcl) |
|
||||
| Postgres | PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance. | [Service Readme](./services/postgres/readme.md) | |
|
||||
| Redis | Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache, and message broker. | [Service Readme](./services/redis/readme.md) | |
|
||||
110
nomad_jobs/apps/gitea-standalone.nomad.hcl
Normal file
110
nomad_jobs/apps/gitea-standalone.nomad.hcl
Normal file
@@ -0,0 +1,110 @@
|
||||
# Deploy Gitea with dependancies encapsulated in the nomad job spec. This spec
|
||||
# will not persist data between restarts. Good for getting started.
|
||||
|
||||
# WARNING: Set a secure password for the postgres user. Line 38
|
||||
# WARNING: Update the domain gitea should be deployed to on traefik. Line 90
|
||||
|
||||
job "gitea-standalone" {
|
||||
datacenters = ["dc1"]
|
||||
|
||||
group "database" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "gitea-postgres-standalone"
|
||||
port = "5432"
|
||||
tags = ["traefik.enable=false"] # Hide postgres from traefik
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide postgres envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "postgres" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "postgres:16.1-alpine3.19"
|
||||
}
|
||||
|
||||
env = {
|
||||
"POSTGRES_USER"="gitea",
|
||||
"POSTGRES_PASSWORD"="not-a-secure-password",
|
||||
"POSTGRES_DB"="gitea"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "frontend" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "ingress" {
|
||||
to = 3000
|
||||
}
|
||||
}
|
||||
|
||||
# Attach to Postgres Instance
|
||||
service {
|
||||
name = "postgres-gitea-standalone-envoy"
|
||||
port = "ingress"
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "gitea-postgres-standalone"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
# Expose to Traefik as a service
|
||||
service {
|
||||
name = "gitea-standalone"
|
||||
port = "ingress"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.gitea-standalone.tls=true",
|
||||
"traefik.http.routers.gitea-standalone.entrypoints=websecure",
|
||||
"traefik.http.routers.gitea-standalone.rule=Host(`git.example.local`)"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "gitea-standalone" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "gitea/gitea:1.21.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
306
nomad_jobs/apps/penpot-standalone.nomad.hcl
Normal file
306
nomad_jobs/apps/penpot-standalone.nomad.hcl
Normal file
@@ -0,0 +1,306 @@
|
||||
locals {
|
||||
TRAEFIK_DOMAIN = "penpot.example.local"
|
||||
PENPOT_PUBLIC_URI = "https://${local.TRAEFIK_DOMAIN}"
|
||||
PENPOT_SECRET_KEY = "op://InfraSecrets/7hbsxng22unjqc4wkj62qniu2u/credential" # Try running `openssl rand -hex 32` to generate a random secret key
|
||||
PENPOT_FLAGS = "enable-demo-users"
|
||||
}
|
||||
|
||||
job "penpot-standalone" {
|
||||
datacenters = ["dc1"]
|
||||
|
||||
group "frontend" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "ingress" {
|
||||
to = 80
|
||||
}
|
||||
}
|
||||
|
||||
# Expose frontend to internet through traefik
|
||||
service {
|
||||
name = "penpot-standalone"
|
||||
port = "ingress"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.penpot-standalone.tls=true",
|
||||
"traefik.http.routers.penpot-standalone.entrypoints=websecure",
|
||||
"traefik.http.routers.penpot-standalone.rule=Host(`${local.TRAEFIK_DOMAIN}`)",
|
||||
]
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "penpot-standalone-backend"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 6060
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "penpot-standalone-exporter"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 6061
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "minio"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 9000
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide service from traefik
|
||||
}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "frontend" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "penpotapp/frontend:2.0.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
env {
|
||||
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
|
||||
PENPOT_BACKEND_URI = "http://127.0.0.1:6060"
|
||||
PENPOT_EXPORTER_URI = "http://127.0.0.1:6061"
|
||||
|
||||
PENPOT_FLAGS = local.PENPOT_FLAGS
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// penpot-standalone-backend
|
||||
group "backend" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'penpot-backend' name
|
||||
name = "penpot-standalone-backend"
|
||||
port = "6060"
|
||||
tags = ["traefik.enable=false"] # Hide redis from traefik
|
||||
|
||||
# Make available through the consul service mesh
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "penpot-standalone-postgres"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "penpot-standalone-redis-cache"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 6379
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "penpot-standalone-fake-smtp"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 1025
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide penpot-backend envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "backend" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "penpotapp/backend:2.0.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
env {
|
||||
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
|
||||
PENPOT_SECRET_KEY = local.PENPOT_SECRET_KEY
|
||||
PENPOT_FLAGS = local.PENPOT_FLAGS
|
||||
PENPOT_DATABASE_URI = "postgresql://127.0.0.1:5432/penpot"
|
||||
PENPOT_DATABASE_USERNAME = "penpot"
|
||||
PENPOT_DATABASE_PASSWORD = "not-a-secure-password"
|
||||
PENPOT_REDIS_URI = "redis://127.0.0.1:6379/0"
|
||||
PENPOT_TELEMERY_ENABLED = "false"
|
||||
|
||||
PENPOT_ASSETS_STORAGE_BACKEND = "assets-fs"
|
||||
PENPOT_STORAGE_ASSETS_FS_DIRECTORY = "/opt/data/assets"
|
||||
|
||||
PENPOT_SMTP_DEFAULT_FROM = "no-reply+penpot-standalone@example.local"
|
||||
PENPOT_SMTP_DEFAULT_REPLY_TO = "no-reply+penpot-standalone@example.local"
|
||||
PENPOT_SMTP_HOST = "127.0.0.1"
|
||||
PENPOT_SMTP_PORT = "1025"
|
||||
PENPOT_SMTP_USERNAME = ""
|
||||
PENPOT_SMTP_PASSWORD = ""
|
||||
PENPOT_SMTP_TLS = "false"
|
||||
PENPOT_SMTP_SSL = "false"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 8000
|
||||
memory = 1024
|
||||
memory_max = 2048
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// penpot-standalone-exporter
|
||||
group "exporter" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
task "exporter" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "penpotapp/exporter:2.0.1"
|
||||
}
|
||||
|
||||
env {
|
||||
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
|
||||
PENPOT_REDIS_URI = "redis://127.0.0.1:6379/0"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "penpot-standalone-exporter"
|
||||
port = "6061"
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "penpot-standalone-redis-cache"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 6379
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// penpot-standalone-postgres
|
||||
group "postgres" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "ingress" {
|
||||
to = 5432
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'postgres' name
|
||||
name = "penpot-standalone-postgres"
|
||||
port = "5432"
|
||||
tags = ["traefik.enable=false"] # Hide postgres from traefik
|
||||
|
||||
# Make available through the consul service mesh
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide postgres envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "postgres" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "postgres:16.1-alpine3.19"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
env = {
|
||||
POSTGRES_USER="penpot",
|
||||
POSTGRES_PASSWORD="not-a-secure-password"
|
||||
POSTGRES_DB="penpot"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// penpot-standalone-redis-cache
|
||||
group "redis" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "redis" {
|
||||
to = 6379
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'redis-cache' name
|
||||
name = "penpot-standalone-redis-cache"
|
||||
port = "6379"
|
||||
tags = ["traefik.enable=false"] # Hide redis from traefik
|
||||
|
||||
# Make available through the consul service mesh
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide redis envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "redis" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "redis:7.2.3-alpine"
|
||||
ports = ["redis"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// penpot-standalone-fake-smtp
|
||||
group "mailcatcher" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "webUI" {
|
||||
to = 1080
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'fake-smtp' name
|
||||
name = "penpot-standalone-fake-smtp"
|
||||
port = "1025"
|
||||
tags = ["traefik.enable=false"] # Hide redis from traefik
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide redis envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "mailcatcher" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "sj26/mailcatcher:latest"
|
||||
ports = ["webUI"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
44
nomad_jobs/services/caddy/caddy.nomad.hcl
Normal file
44
nomad_jobs/services/caddy/caddy.nomad.hcl
Normal file
@@ -0,0 +1,44 @@
|
||||
job "caddy" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "caddy" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
port "http" {
|
||||
to = 80
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "caddy"
|
||||
provider = "consul"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.caddy.tls=true",
|
||||
"traefik.http.routers.caddy.entrypoints=websecure",
|
||||
"traefik.http.routers.caddy.rule=Host(`example.local`)"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "caddy" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "caddy:alpine"
|
||||
ports = ["http"]
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
16
nomad_jobs/services/caddy/readme.md
Normal file
16
nomad_jobs/services/caddy/readme.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Caddy Web Server
|
||||
|
||||
Caddy is a simple and performant web server / reverse proxy written in Go. It is designed to be easy to use and configure. It is great to test network connectivity and routing similar to how someone might use nginx just to make sure the host can be reached and server content.
|
||||
|
||||
While you can absolutely use Caddy as a reverse proxy, Traefik easily integrates with consul for service discovery so that is what is used for the reverse proxy and this nomad job spec is placed behind Traefik.
|
||||
|
||||
## Nomad Job for Caddy
|
||||
|
||||
There are no caddy configurations configured for this job spec. If you run it, it will register with consul and be available to Traefik for routing. If the domain name is configured correctly, you should be able to reach the Caddy welcome page.
|
||||
|
||||
## TODO
|
||||
If you want to deploy this, you will need to update the domain name in the job spec.
|
||||
|
||||
| Line | Default | Adjustment |
|
||||
| --- | --- | --- |
|
||||
| 23 | `"traefik.http.routers.caddy.rule=Host('example.com')"` | Change `example.com` to your domain name |
|
||||
52
nomad_jobs/services/domainredirect/domainredirect.nomad.hcl
Normal file
52
nomad_jobs/services/domainredirect/domainredirect.nomad.hcl
Normal file
@@ -0,0 +1,52 @@
|
||||
locals {
|
||||
HOST = "example.local"
|
||||
TARGET = "https://example.com/path"
|
||||
}
|
||||
|
||||
job "domainredirect" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "domainredirect" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
port "http" {
|
||||
to = 8080
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "domainredirect"
|
||||
provider = "consul"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.domainredirect.tls=true",
|
||||
"traefik.http.routers.domainredirect.entrypoints=websecure",
|
||||
"traefik.http.routers.domainredirect.rule=Host(`${local.HOST}`)",
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/health"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "domainredirect" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "git.cbraaten.dev/caleb/domainredirect:latest"
|
||||
ports = ["http"]
|
||||
}
|
||||
|
||||
env {
|
||||
REDIRECT_TARGET = local.TARGET
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
18
nomad_jobs/services/domainredirect/readme.md
Normal file
18
nomad_jobs/services/domainredirect/readme.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# domainredirect
|
||||
|
||||
domainredirect is just a redirect service that will redirect any traffic to the url you you specify. This is useful for redirecting traffic from one domain to another.
|
||||
|
||||
## Nomad Job for domainredirect
|
||||
|
||||
The domainredirect expects a REDIRECT_TARGET environment variable to be set. This is the complete url that the domainredirect will redirect to. You can set it on line 2 of the job spec.
|
||||
|
||||
## TODO
|
||||
If you want to deploy this, you will need to update the domain name in the job spec.
|
||||
|
||||
| Line | Default | Adjustment |
|
||||
| --- | --- | --- |
|
||||
| 2 | `HOST = "example.local"` | Change `example.local` to the domain you are listening on |
|
||||
| 3 | `TARGET = "https://example.com/path"` | Change `example.com/path` to your destination |
|
||||
|
||||
## Request
|
||||
If you are deploying this regularly, please consider pulling the image and pushing it to your own registry. This will help reduce the load on my registry and help keep the image available for everyone. (Although it's not that complex, you could also [build the image yourself.](https://git.cbraaten.dev/Caleb/DomainRedirect))
|
||||
3
nomad_jobs/services/gitea/dockerfile_gitea-act-runner
Normal file
3
nomad_jobs/services/gitea/dockerfile_gitea-act-runner
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM gitea/act_runner:0.2.10-dind-rootless
|
||||
USER root
|
||||
|
||||
77
nomad_jobs/services/gitea/gitea.nomad.hcl
Normal file
77
nomad_jobs/services/gitea/gitea.nomad.hcl
Normal file
@@ -0,0 +1,77 @@
|
||||
locals {
|
||||
SUBDOMAIN = "git." // End with dot or leave blamk for root domain
|
||||
DOMAIN = "example.local"
|
||||
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
|
||||
}
|
||||
|
||||
job "gitea" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "application" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "ingress" {
|
||||
static = 3000
|
||||
}
|
||||
}
|
||||
|
||||
volume "gitea-data" {
|
||||
type = "host"
|
||||
source = "gitea-data"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "gitea"
|
||||
port = "ingress"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "postgres"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
}
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.gitea.tls=true",
|
||||
"traefik.http.routers.gitea.entrypoints=websecure",
|
||||
"traefik.http.routers.gitea.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "gitea" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "gitea/gitea:1.22.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "gitea-data"
|
||||
destination = "/data"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 3000
|
||||
memory = 2000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
52
nomad_jobs/services/gitea/readme.md
Normal file
52
nomad_jobs/services/gitea/readme.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# Gitea
|
||||
Gitea is a self-hosted git service. It is a great alternative to GitHub or GitLab. It is lightweight and easy to use. It is also easy to deploy and manage while still providing for functionality like SSO and LDAP integration.
|
||||
|
||||
Gitea should be configured to not utilize SSH as the job spec does not support it so that SSH is not exposed outside of the home network. If you want to use SSH, you will need to modify the job spec to expose the port and configure the service to use it. You can still run git operations over HTTPS.
|
||||
|
||||
## Nomad Job for Gitea
|
||||
You will need to modify the job spec items listed under [TODO](./readme.md#TODO) but there are no Gitea specific adjustments needed. If you run it, it will register with consul and be available to Traefik for routing. If the domain name is configured correctly, you should be able to reach the Gitea setup page to make the needed configuration changes.
|
||||
|
||||
## Service Dependencies
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs)
|
||||
- [Postgres](../postgres/readme.md)
|
||||
|
||||
## TODO
|
||||
If you want to deploy this, you will need to verify you have a valid host volume.
|
||||
|
||||
| Line | Default | Adjustment |
|
||||
| --- | --- | --- |
|
||||
| 17 | `source = "gitea-data"` | Change `gitea-data` to a valid host volume name |
|
||||
| 66 | `volume = "gitea-data"` | Change `gitea-data` to the host volume defined on line 15 if applicable |
|
||||
|
||||
> To make the instance accessible through TRAEFIK you will need to define the domain to listen on by setting the value(s) on lines 2 and 3.
|
||||
|
||||
|
||||
## Configuring Gitea
|
||||
There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Gitea. Postgres should be mounted to the container on the standard `5432` port so you can select postgres as the database type and use `127.0.0.1:5432` as the address and input the username, password, and database name you created for Gitea to use.
|
||||
|
||||
If you need help making those credentials, take a look at the [postgres readme](../postgres/readme.md#make-a-new-database).
|
||||
|
||||
# Adding CI/CD
|
||||
Gitea has a fork of act runner that can be used to run Github actions. In order to deploy this with Nomad, you will need to leverage Docker in Docker (DinD) with privileged mode enabled in Docker or pay for the bussiness plan of Docker to have better app isolation. The default runner image provided by Gitea was failing to start DinD Daemon so I included a dockerfile that you can use to specify that the container should be ran as the root user.
|
||||
|
||||
1. Build Image
|
||||
```bash
|
||||
docker build --network host --platform linux/amd64 -t <your_gitea_domain>/caleb/nomad_act_runner:0.0.1 .
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> You may not need to specify the platform flag. If you use Apple Silicon but deploy to X86, you will need to include the flag.
|
||||
|
||||
2. Push Image
|
||||
```bash
|
||||
docker push <your_gitea_domain>/caleb/nomad_act_runner:0.0.1
|
||||
```
|
||||
|
||||
4. Run the nomad job with the Gitea_Runner_Token
|
||||
```bash
|
||||
nomad job run -var "grt=<your_token>" -var "domain=<gitea_domain>" runner.nomad.hcl
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> If you prefer to not use cli variables, you can update the top of the Nomad Job Spec and manually put in the env variables.
|
||||
|
||||
71
nomad_jobs/services/gitea/runner.nomad.hcl
Normal file
71
nomad_jobs/services/gitea/runner.nomad.hcl
Normal file
@@ -0,0 +1,71 @@
|
||||
variable "grt" {
|
||||
type = string
|
||||
description = "Gitea runner token"
|
||||
}
|
||||
|
||||
variable "domain" {
|
||||
type = string
|
||||
description = "Gitea Domain Name"
|
||||
}
|
||||
|
||||
locals {
|
||||
GITEA_RUNNER_TOKEN = var.grt # Replace with raw token surrounded by quotes if you don't want to pass via cli or using web ui
|
||||
GITEA_DOMAIN = var.domain # Replace with domain surrounded by quotes if you don't want to pass via cli or using web ui
|
||||
GITEA_RUNNER_NAME = "${NOMAD_TASK_NAME}-${NOMAD_ALLOC_INDEX}"
|
||||
}
|
||||
|
||||
job "gitea-runner" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "application" {
|
||||
count = 1
|
||||
|
||||
scaling {
|
||||
enabled = true
|
||||
min = 1
|
||||
max = 5
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "gitea-runner"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "gitea"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 3000
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "gitea-runner" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "${local.GITEA_DOMAIN}/caleb/nomad_act_runner:0.0.1"
|
||||
privileged = true
|
||||
}
|
||||
|
||||
env = {
|
||||
GITEA_INSTANCE_URL="http://${NOMAD_UPSTREAM_ADDR_gitea}"
|
||||
GITEA_RUNNER_REGISTRATION_TOKEN="${local.GITEA_RUNNER_TOKEN}"
|
||||
GITEA_RUNNER_NAME="${local.GITEA_RUNNER_NAME}"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 2000
|
||||
memory = 2000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
89
nomad_jobs/services/jellyfin/jellyfin.nomad.hcl
Normal file
89
nomad_jobs/services/jellyfin/jellyfin.nomad.hcl
Normal file
@@ -0,0 +1,89 @@
|
||||
locals {
|
||||
SUBDOMAIN = "jellyfin." // End with dot or leave blamk for root domain
|
||||
DOMAIN = "example.com"
|
||||
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
|
||||
}
|
||||
|
||||
job "jellyfin" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "application" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "host"
|
||||
port "httpIngress" { static = 8096 }
|
||||
port "serviceDiscovery" { static = 1900 }
|
||||
port "clientDiscovery" { static = 7359 }
|
||||
}
|
||||
|
||||
volume "jellyfin-cache" {
|
||||
type = "host"
|
||||
source = "jellyfinCache"
|
||||
}
|
||||
|
||||
volume "jellyfin-config" {
|
||||
type = "host"
|
||||
source = "jellyfinConfig"
|
||||
}
|
||||
|
||||
volume "jellyfin-data" {
|
||||
type = "host"
|
||||
source = "media"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "jellyfin"
|
||||
port = "httpIngress"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.jellyfin.tls=true",
|
||||
"traefik.http.routers.jellyfin.entrypoints=websecure",
|
||||
"traefik.http.routers.jellyfin.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/health"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "jellyfin" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "jellyfin/jellyfin:2024080505"
|
||||
ports = ["httpIngress", "serviceDiscovery", "clientDiscovery"]
|
||||
}
|
||||
|
||||
env = {
|
||||
JELLYFIN_PublishedServerUrl="${local.TRAEFIK_DOMAIN}"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "jellyfin-cache"
|
||||
destination = "/cache"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "jellyfin-config"
|
||||
destination = "/config"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "jellyfin-data"
|
||||
destination = "/media"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 2000
|
||||
memory = 1024
|
||||
memory_max = 2048
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
36
nomad_jobs/services/jellyfin/readme.md
Normal file
36
nomad_jobs/services/jellyfin/readme.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Jellyfin
|
||||
|
||||
Jellyfin is a Free Software Media System that puts you in control of managing and streaming your media. It is an alternative to the proprietary Emby and Plex, to provide media from a dedicated server to end-user devices via multiple apps.
|
||||
|
||||
## Nomad Job for Gitea
|
||||
|
||||
You will need to modify the job spec items listed under [TODO](./readme.md#TODO) but there are no Jellyfin specific adjustments needed. If you run it, it will register with consul and be available to Traefik for routing. If the domain name is configured correctly, you should be able to reach the Jellyfin setup page to make the needed configuration changes such as defining the media libraries.
|
||||
|
||||
## Service Dependencies
|
||||
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Cache
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Config
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Media
|
||||
|
||||
## TODO
|
||||
|
||||
If you want to deploy this, you will need to verify you have the necessary host volumes.
|
||||
|
||||
| Line | Default | Adjustment |
|
||||
| ---- | --------------------------- | --------------------------------------------------------------------------- |
|
||||
| 23 | `source = "jellyfinCache"` | Change `jellyfinCache` to a valid host volume name |
|
||||
| 28 | `source = "jellyfinConfig"` | Change `jellyfinConfig` to a valid host volume name |
|
||||
| 33 | `source = "media"` | Change `media` to a valid host volume name |
|
||||
| 68 | `volume = "jellyfinCache"` | Change `jellyfinCache` to the host volume defined on line 21 if applicable |
|
||||
| 74 | `volume = "jellyfinConfig"` | Change `jellyfinConfig` to the host volume defined on line 26 if applicable |
|
||||
| 79 | `volume = "media"` | Change `media` to the host volume defined on line 31 if applicable |
|
||||
|
||||
> To make the instance accessible through TRAEFIK you will need to define the domain to listen on by setting the value(s) on lines 2 and 3.
|
||||
|
||||
## Configuring Jellyfin
|
||||
|
||||
There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Jellyfin.
|
||||
|
||||
> I recomend using a single root directory for media and then creating subdirectories for each type of media. This will make it easier to manage the media via SFTP and to configure Jellyfin.
|
||||
|
||||
> If this is deployed on Alpine Linux, you won't be able to pass through dedicated NVIDIA hardware because `nvidia-container-toolkit` is not available on MUSL. You will need to use a different root operating system like Ubuntu if you want hardware acceleration with NVIDIA hardware.
|
||||
36
nomad_jobs/services/mailcatcher/mailcatcher.nomad.hcl
Normal file
36
nomad_jobs/services/mailcatcher/mailcatcher.nomad.hcl
Normal file
@@ -0,0 +1,36 @@
|
||||
job "mailcatcher" {
|
||||
datacenters = ["dc1"]
|
||||
|
||||
group "mailcatcher" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "webUI" {
|
||||
to = 1080
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'fake-smtp' name
|
||||
name = "fake-smtp"
|
||||
port = "1025"
|
||||
tags = ["traefik.enable=false"] # Hide redis from traefik
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide redis envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "mailcatcher" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "sj26/mailcatcher:latest"
|
||||
ports = ["webUI"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
7
nomad_jobs/services/mailcatcher/readme.md
Normal file
7
nomad_jobs/services/mailcatcher/readme.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Mailcatcher
|
||||
Mailcatcher is a simple SMTP server that catches all mail sent to it and displays it in a web interface. This is useful for development and testing of email sending services. This is not a production ready service and should not be used in a production environment.
|
||||
|
||||
## Nomad Job for Mailcatcher
|
||||
Mailcatcher requires no configuration but is only available through the service mesh for sending SMTP mail. The service to connect to is fittingly called `fake-smtp` and is available to the localhost on port `1025`. There is no need for an email or password and the service is not encrypted so you should not use TLS or SSL. (It might work, I just haven't tried it.) - This is not something you should expose to the internet. It's just for testing and development.
|
||||
|
||||
Once you have sent an email to the fake SMTP server, you can view the email in the web interface. The web interface has a dynamic port assigned by Nomad so you will need to go to the Task Allocation page in the Nomad UI where you can open the link under 'WebUI' to view the emails.
|
||||
59
nomad_jobs/services/minio/minio-singleton.nomad.hcl
Normal file
59
nomad_jobs/services/minio/minio-singleton.nomad.hcl
Normal file
@@ -0,0 +1,59 @@
|
||||
job "minio-singleton" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "minio" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "console" {
|
||||
to = 9090
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'minio-singleton' name
|
||||
name = "minio-singleton"
|
||||
port = "9000"
|
||||
tags = ["traefik.enable=false"] # Hide minio from traefik
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide minio from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume "minio-data" {
|
||||
type = "host"
|
||||
source = "minio"
|
||||
}
|
||||
|
||||
task "minio" {
|
||||
driver = "docker"
|
||||
|
||||
volume_mount {
|
||||
volume = "minio-data"
|
||||
destination = "/data"
|
||||
}
|
||||
|
||||
config {
|
||||
image = "quay.io/minio/minio"
|
||||
ports = ["console"]
|
||||
command = "server"
|
||||
args = ["/data", "--console-address", ":9090"]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 2000
|
||||
}
|
||||
|
||||
env {
|
||||
MINIO_ROOT_USER="op://InfraSecrets/Minio-Singleton/username"
|
||||
MINIO_ROOT_PASSWORD="op://InfraSecrets/Minio-Singleton/password"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
143
nomad_jobs/services/minio/minio.nomad.hcl
Normal file
143
nomad_jobs/services/minio/minio.nomad.hcl
Normal file
@@ -0,0 +1,143 @@
|
||||
# The use of Minio in this stack is not architected for high availability or
|
||||
# data integrity and as such, is not recommended for production use. Instead,
|
||||
# this is for making an s3 compatible storage available to the service mesh
|
||||
# and ZFS is relied upon for data integrity within a single node storage pool.
|
||||
|
||||
# For a production ready Minio deployment, please start with the following:
|
||||
# https://min.io/docs/minio/kubernetes/upstream/operations/concepts/architecture.html
|
||||
|
||||
# Note: This configures TWO minio instances, one for "HOT" storage made up of
|
||||
# SSDs and a "WARM" instance with HDDs instead. Manual configuration of tiers
|
||||
# is required to make use of this feature. TODO: Automate this.
|
||||
|
||||
job "minio" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "minio" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "console" {
|
||||
to = 9090
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'minio' name
|
||||
name = "minio"
|
||||
port = "9000"
|
||||
tags = ["traefik.enable=false"] # Hide minio from traefik
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide minio from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "minio-backend-envoy"
|
||||
tags = ["traefik.enable=false"] # Hide minio-backend from traefik
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "minio-backend"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 9001
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide minio-backend from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume "minio-data" {
|
||||
type = "host"
|
||||
source = "minio-ssd" # Tier 1 Storage Host Volume
|
||||
}
|
||||
|
||||
task "minio" {
|
||||
driver = "docker"
|
||||
|
||||
volume_mount {
|
||||
volume = "minio-data"
|
||||
destination = "/data"
|
||||
}
|
||||
|
||||
config {
|
||||
image = "quay.io/minio/minio"
|
||||
ports = ["console"]
|
||||
command = "server"
|
||||
args = ["/data", "--console-address", ":9090"]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 2000
|
||||
}
|
||||
|
||||
env {
|
||||
MINIO_ROOT_USER="op://InfraSecrets/Minio Tier 1/username"
|
||||
MINIO_ROOT_PASSWORD="op://InfraSecrets/Minio Tier 1/password"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "minio-hdd" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "console" {
|
||||
to = 9090
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "minio-backend"
|
||||
port = "9000"
|
||||
tags = ["traefik.enable=false"] # Hide minio-backend from traefik
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide minio-backend from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume "minio-warm-data" {
|
||||
type = "host"
|
||||
source = "minio-hdd" # Tier 2 Storage Host Volume
|
||||
}
|
||||
|
||||
task "minio-hdd" {
|
||||
driver = "docker"
|
||||
|
||||
|
||||
volume_mount {
|
||||
volume = "minio-warm-data"
|
||||
destination = "/data"
|
||||
}
|
||||
|
||||
config {
|
||||
image = "quay.io/minio/minio"
|
||||
ports = ["console"]
|
||||
command = "server"
|
||||
args = ["/data", "--console-address", ":9090"]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 2000
|
||||
}
|
||||
|
||||
env {
|
||||
MINIO_ROOT_USER="op://InfraSecrets/Minio Tier 2/username"
|
||||
MINIO_ROOT_PASSWORD="op://InfraSecrets/Minio Tier 2/password"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
48
nomad_jobs/services/minio/readme.md
Normal file
48
nomad_jobs/services/minio/readme.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Minio
|
||||
Minio is an open source object storage server that is compatible with Amazon S3. You can use it to store and retrieve data from any application that requires s3 storage. You can configure storage tiers and lifecycle policies to manage your data with things like retention and expiration or movements between storage classes.
|
||||
|
||||
The use of Minio in this stack is not architected for high availability or data integrity and as such, is not recommended for production use of any reasonably large scale. Instead, this is for making an s3 compatible storage available to the service mesh and ZFS is relied upon for data integrity within a single node storage pool.
|
||||
|
||||
For a production ready Minio deployment, please start with the following:
|
||||
https://min.io/docs/minio/kubernetes/upstream/operations/concepts/architecture.html
|
||||
|
||||
## Nomad Job for Minio
|
||||
Nomad requires a Host Volume to persist data across restarts. This will limit the portability of the running instance but it is simple to configure. If you want have dynamic storage, you will need to modify the job spec to use a different storage driver such as [Ceph](https://docs.ceph.com/en/latest/start/intro/) or [Seaweedfs](https://github.com/seaweedfs/seaweedfs/wiki). Both provide object storage that is S3 compatible so if you deploy those, you may not have a need for Minio but the admin interface and features of Minio may still meet your needs better.
|
||||
|
||||
### Minio-Singleton (minio-singleton.nomad.hcl)
|
||||
This job is for a single instance of Minio with no tiering. It is the simplest configuration and is suitable for a small amount of data. It is great for getting started with your own S3 compatible storage.
|
||||
|
||||
#### Service Dependencies
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs)
|
||||
|
||||
#### TODO
|
||||
If you want to deploy this, you will need to verify you have a valid host volume and set the initial root credentials.
|
||||
|
||||
| Line | Default | Adjustment |
|
||||
| --- | --- | --- |
|
||||
| 30 | `source = "minio"` | Change `minio` to a valid host volume name if applicable |
|
||||
| 37 | `volume = "minio-data"` | Change `minio-data` to the host volume defined on line 28 if applicable |
|
||||
| 54 | `"MINIO_ROOT_USER"="op://InfraSecrets/Minio-Singleton/username"` | Change the value to the root username you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
|
||||
| 55 | `"MINIO_ROOT_PASSWORD"="op://InfraSecrets/Minio-Singleton/password"` | Change the value to the root password you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
|
||||
|
||||
### Minio-Tiered (minio.nomad.hcl)
|
||||
This job is for an instance of Minio with tiering. If your host has SSDs and HDDs, you can configure Minio to use the SSDs for "HOT" storage and the HDDs for "WARM" storage. This is useful for managing data that is accessed frequently and data that is accessed infrequently.
|
||||
|
||||
> IMPORTANT: Tiering is not a backup solution! You should still have a backup strategy for your data. Best to make backups regularly and follow at least the 3-2-1 rule: 3 copies of your data, 2 on different media, 1 offsite.
|
||||
|
||||
#### Service Dependencies
|
||||
- Two Valid [Host Volumes](../../../host_init/README.md#storage-and-zfs)
|
||||
|
||||
#### TODO
|
||||
If you want to deploy this, you will need to verify you have a valid host volume and set the initial root credentials.
|
||||
|
||||
| Line | Default | Adjustment |
|
||||
| --- | --- | --- |
|
||||
| 59 | `source = "minio-ssd"` | Change `minio-ssd` to a valid host volume name if applicable |
|
||||
| 66 | `volume = "minio-data"` | Change `minio-data` to the host volume defined on line 57 if applicable |
|
||||
| 83 | `"MINIO_ROOT_USER"="op://InfraSecrets/Minio Tier 1/username"` | Change the value to the root username you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
|
||||
| 84 | `"MINIO_ROOT_PASSWORD"="op://InfraSecrets/Minio Tier 1/password"` | Change the value to the root password you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
|
||||
| 113 | `source = "minio-hdd"` | Change `minio-hdd` to a valid host volume name if applicable |
|
||||
| 121 | `volume = "minio-warm-data"` | Change `minio-warm-data` to the host volume defined on line 111 if applicable |
|
||||
| 138 | `"MINIO_ROOT_USER"="op://InfraSecrets/Minio Tier 2/username"` | Change the value to the root username you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
|
||||
| 139 | `"MINIO_ROOT_PASSWORD"="op://InfraSecrets/Minio Tier 2/password"` | Change the value to the root password you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
|
||||
241
nomad_jobs/services/penpot/penpot.nomad.hcl
Normal file
241
nomad_jobs/services/penpot/penpot.nomad.hcl
Normal file
@@ -0,0 +1,241 @@
|
||||
# Listening Domain
|
||||
locals {
|
||||
SUBDOMAIN = "penpot." // End with dot or leave blamk for root domain
|
||||
DOMAIN = "example.local"
|
||||
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
|
||||
}
|
||||
|
||||
# Application routing environment variables
|
||||
locals {
|
||||
PENPOT_PUBLIC_URI = "https://${local.TRAEFIK_DOMAIN}"
|
||||
PENPOT_BACKEND_URI = "http://127.0.0.1:6060"
|
||||
PENPOT_EXPORTER_URI = "http://127.0.0.1:6061"
|
||||
PENPOT_FLAGS = "enable-smtp enable-registration enable-login-with-password enable-demo-users"
|
||||
PENPOT_SECRET_KEY = "op://InfraSecrets/7hbsxng22unjqc4wkj62qniu2u/credential" # Try running `openssl rand -hex 32` to generate a random secret key
|
||||
PENPOT_DATABASE_URI = "postgresql://127.0.0.1:5432/penpot"
|
||||
PENPOT_DATABASE_USERNAME = "op://InfraSecrets/Postgres - Penpot User/username"
|
||||
PENPOT_DATABASE_PASSWORD = "op://InfraSecrets/Postgres - Penpot User/password"
|
||||
PENPOT_REDIS_URI = "redis://127.0.0.1:6379/0"
|
||||
PENPOT_TELEMERY_ENABLED = "false"
|
||||
}
|
||||
|
||||
# Assets storage environment variables (fs or s3)
|
||||
locals {
|
||||
// PENPOT_ASSETS_STORAGE_BACKEND = "assets-fs"
|
||||
PENPOT_STORAGE_ASSETS_FS_DIRECTORY = "/opt/data/assets"
|
||||
|
||||
PENPOT_ASSETS_STORAGE_BACKEND = "assets-s3"
|
||||
AWS_ACCESS_KEY_ID = "op://InfraSecrets/Penpot S3 Key/username"
|
||||
AWS_SECRET_ACCESS_KEY = "op://InfraSecrets/Penpot S3 Key/credential"
|
||||
PENPOT_STORAGE_ASSETS_S3_ENDPOINT = "http://127.0.0.1:9000"
|
||||
PENPOT_STORAGE_ASSETS_S3_BUCKET = "penpot"
|
||||
}
|
||||
|
||||
# SMTP environment variables
|
||||
locals {
|
||||
PENPOT_SMTP_DEFAULT_FROM = "no-reply+penpot@${local.DOMAIN}"
|
||||
PENPOT_SMTP_DEFAULT_REPLY_TO = "no-reply+penpot@${local.DOMAIN}"
|
||||
PENPOT_SMTP_HOST = "127.0.0.1"
|
||||
PENPOT_SMTP_PORT = "1025"
|
||||
PENPOT_SMTP_USERNAME = ""
|
||||
PENPOT_SMTP_PASSWORD = ""
|
||||
PENPOT_SMTP_TLS = "false"
|
||||
PENPOT_SMTP_SSL = "false"
|
||||
}
|
||||
|
||||
job "penpot" {
|
||||
datacenters = ["dc1"]
|
||||
|
||||
group "frontend" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "ingress" {
|
||||
to = 80
|
||||
}
|
||||
}
|
||||
|
||||
# Expose frontend to internet through traefik
|
||||
service {
|
||||
name = "penpot"
|
||||
port = "ingress"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.penpot.tls=true",
|
||||
"traefik.http.routers.penpot.entrypoints=websecure",
|
||||
"traefik.http.routers.penpot.rule=Host(`${local.TRAEFIK_DOMAIN}`)",
|
||||
]
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "penpot-backend"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 6060
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "penpot-exporter"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 6061
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "minio"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 9000
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide service from traefik
|
||||
}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "frontend" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "penpotapp/frontend:2.0.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
env {
|
||||
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
|
||||
PENPOT_BACKEND_URI = local.PENPOT_BACKEND_URI
|
||||
PENPOT_EXPORTER_URI = local.PENPOT_EXPORTER_URI
|
||||
|
||||
PENPOT_FLAGS = local.PENPOT_FLAGS
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "backend" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'penpot-backend' name
|
||||
name = "penpot-backend"
|
||||
port = "6060"
|
||||
tags = ["traefik.enable=false"] # Hide redis from traefik
|
||||
|
||||
# Make available through the consul service mesh
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "postgres"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "redis-cache"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 6379
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "minio"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 9000
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "fake-smtp"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 1025
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide penpot-backend envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "backend" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "penpotapp/backend:2.0.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
env {
|
||||
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
|
||||
PENPOT_SECRET_KEY = local.PENPOT_SECRET_KEY
|
||||
PENPOT_DATABASE_URI = local.PENPOT_DATABASE_URI
|
||||
PENPOT_DATABASE_USERNAME = local.PENPOT_DATABASE_USERNAME
|
||||
PENPOT_DATABASE_PASSWORD = local.PENPOT_DATABASE_PASSWORD
|
||||
PENPOT_REDIS_URI = local.PENPOT_REDIS_URI
|
||||
PENPOT_FLAGS = local.PENPOT_FLAGS
|
||||
PENPOT_TELEMERY_ENABLED = local.PENPOT_TELEMERY_ENABLED
|
||||
|
||||
PENPOT_ASSETS_STORAGE_BACKEND = local.PENPOT_ASSETS_STORAGE_BACKEND
|
||||
PENPOT_STORAGE_ASSETS_FS_DIRECTORY = local.PENPOT_STORAGE_ASSETS_FS_DIRECTORY
|
||||
AWS_ACCESS_KEY_ID = local.AWS_ACCESS_KEY_ID
|
||||
AWS_SECRET_ACCESS_KEY = local.AWS_SECRET_ACCESS_KEY
|
||||
PENPOT_STORAGE_ASSETS_S3_ENDPOINT = local.PENPOT_STORAGE_ASSETS_S3_ENDPOINT
|
||||
PENPOT_STORAGE_ASSETS_S3_BUCKET = local.PENPOT_STORAGE_ASSETS_S3_BUCKET
|
||||
|
||||
PENPOT_SMTP_DEFAULT_FROM = local.PENPOT_SMTP_DEFAULT_FROM
|
||||
PENPOT_SMTP_DEFAULT_REPLY_TO = local.PENPOT_SMTP_DEFAULT_REPLY_TO
|
||||
PENPOT_SMTP_HOST = local.PENPOT_SMTP_HOST
|
||||
PENPOT_SMTP_PORT = local.PENPOT_SMTP_PORT
|
||||
PENPOT_SMTP_USERNAME = local.PENPOT_SMTP_USERNAME
|
||||
PENPOT_SMTP_PASSWORD = local.PENPOT_SMTP_PASSWORD
|
||||
PENPOT_SMTP_TLS = local.PENPOT_SMTP_TLS
|
||||
PENPOT_SMTP_SSL = local.PENPOT_SMTP_SSL
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 8000
|
||||
memory = 1024
|
||||
memory_max = 2048
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group "exporter" {
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
task "exporter" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "penpotapp/exporter:2.0.1"
|
||||
}
|
||||
|
||||
env {
|
||||
PENPOT_PUBLIC_URI = local.PENPOT_PUBLIC_URI
|
||||
PENPOT_REDIS_URI = local.PENPOT_REDIS_URI
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "penpot-exporter"
|
||||
port = "6061"
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "redis-cache"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 6379
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
21
nomad_jobs/services/penpot/readme.md
Normal file
21
nomad_jobs/services/penpot/readme.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Penpot
|
||||
Penpot is the Open-Source Design & Prototyping Tool for Product Teams. It is a great alternative to Figma or Adobe XD that you can host yourself. Learn all about it on their [website](https://penpot.app/).
|
||||
|
||||
## Nomad Job for Penpot
|
||||
Penpot already hosts documentation related to their architecture and how to deploy it; therefore, this will not duplicate documentation that can be found on their website such as available flags that can be applied. The nomad spec available in this repository defined the frontend, backend, and exporter services. It is expected that you already have the service dependencies running and available to the Penpot service as well as valid postgres credentials.
|
||||
|
||||
If you need help making those credentials, take a look at the [postgres readme](../postgres/readme.md#make-a-new-database).
|
||||
|
||||
## Service Dependencies
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) or an S3 storage provider like [Minio](../minio/readme.md)
|
||||
- [Postgres](../postgres/readme.md)
|
||||
- [Redis Cache](../redis/readme.md)
|
||||
- *optional* - An SMTP server like [mailcatcher](../mailcatcher/readme.md) or a legitimate mail server
|
||||
|
||||
## Configuring Penpot
|
||||
Provided you do not need to make adjustments to the services exposed via the consul sidecar, the only edits needed are lines 1-42. The 'locals' blocks are referenced in other sections of the spec and as such we can do all of our configuration in one place. Some notable items are as follows:
|
||||
|
||||
- TRAEFIK_DOMAIN - The domain you will be registering with traefik for access to the application
|
||||
- PENPOT_FLAGS - remove 'enable_smtp' if you are not going to give SMTP configuration
|
||||
- PENPOT_STORAGE_BACKEND - The configurations are given for both file system and minio storage. It doesn't affect anything if you include environment variables for both but you must make sure that you have the storage backend set to either 'assests-fs' or 'assets-s3' to inform Penpot which you intend to use. If you are using a filesystem and wish to persist data, you will need to make sure that your host volumes are properly configured.
|
||||
|
||||
53
nomad_jobs/services/postgres/postgres.nomad.hcl
Normal file
53
nomad_jobs/services/postgres/postgres.nomad.hcl
Normal file
@@ -0,0 +1,53 @@
|
||||
job "postgres" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "database" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "ingress" {
|
||||
to = 5432
|
||||
}
|
||||
}
|
||||
|
||||
volume "postgres-data" {
|
||||
type = "host"
|
||||
source = "postgres"
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'postgres' name
|
||||
name = "postgres"
|
||||
port = "5432"
|
||||
tags = ["traefik.enable=false"] # Hide postgres from traefik
|
||||
|
||||
# Make available through the consul service mesh
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide postgres envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "postgres" {
|
||||
driver = "docker"
|
||||
|
||||
volume_mount {
|
||||
volume = "postgres-data"
|
||||
destination = "/var/lib/postgresql/data"
|
||||
}
|
||||
|
||||
config {
|
||||
image = "postgres:16.1-alpine3.19"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
env = {
|
||||
POSTGRES_USER="op://InfraSecrets/Postgres Root/username",
|
||||
POSTGRES_PASSWORD="op://InfraSecrets/Postgres Root/password"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
36
nomad_jobs/services/postgres/readme.md
Normal file
36
nomad_jobs/services/postgres/readme.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Postgres
|
||||
Postgres is a relational database that is open source and widely used. This is a single instance of postgres relying on the host volume for storage. This is not a highly available or fault tolerant setup. The only data redundancy is at the storage layer through ZFS but that is on a single host. If high availability or scalability is a requirement for you, consider a cloud provider like [Neon](https://neon.tech/) or a more robust setup.
|
||||
|
||||
## Nomad Job for Postgres
|
||||
Nomad requires a Host Volume to persist data across restarts. This will limit the portability of the running instance but it is simple to configure. If you want have dynamic storage, you will need to modify the job spec to use a different storage driver such as [Ceph](https://docs.ceph.com/en/latest/start/intro/) or [Seaweedfs](https://github.com/seaweedfs/seaweedfs/wiki).
|
||||
|
||||
Postgres will have a default user which is a good one to use for making application specific users and databases. These are defined through environment variables in the nomad job spec so you only need to edit the job spec to meet your requirements. If you run it, it will register with consul but be hidden to Traefik meaning you can only access it through the service mesh.
|
||||
|
||||
## Service Dependencies
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs)
|
||||
|
||||
## TODO
|
||||
If you want to deploy this, you will need to verify you have a valid host volume and set the initial postgres root credentials.
|
||||
|
||||
| Line | Default | Adjustment |
|
||||
| --- | --- | --- |
|
||||
| 17 | `source = "postgres"` | Change `postgres` to a valid host volume name |
|
||||
| 38 | `volume = "postgres-data"` | Change `postgres-data` to the host volume defined on line 15 if applicable |
|
||||
| 48 | `"POSTGRES_USER"="op://InfraSecrets/Postgres Root/username"` | Change the value to the root username you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
|
||||
| 49 | `"POSTGRES_PASSWORD"="op://InfraSecrets/Postgres Root/password"` | Change the value to the root password you want. By default, this is a 1password path. See [Managing Secrets](../../README.md#managing-secrets) for more information |
|
||||
|
||||
|
||||
## Make a New Database
|
||||
You can easily deploy a new postgres database by changing the job name on `line 1` and the service name it is exposed as on `line 22`. You should of course make the other changes mentioned above in the TODO section as well to not cause resource conflicts or use duplicated credentials.
|
||||
|
||||
Alternatively, postgres is a relational database management system (RDBMS) meaning we can actually have multiple databases for different applications within a single instance. This is not recommended for production environments as it is a single point of failure. If the postgres instance goes down, all the databases go down but it is a good way to reduce the overhead of running multiple database instances.
|
||||
|
||||
You can make a new user and database by entering the exec shell of the postgres
|
||||
container through the nomad UI and run psql to connect using the root credentials. From there run the following example command to make a user and database for your application:
|
||||
|
||||
```sql
|
||||
CREATE USER appname WITH PASSWORD 'not-a-secure-password';
|
||||
CREATE DATABASE appname WITH OWNER appname;
|
||||
```
|
||||
|
||||
The user and database can be the same name because they are records in different tables but feel free to make them whatever you think is best.
|
||||
7
nomad_jobs/services/redis/readme.md
Normal file
7
nomad_jobs/services/redis/readme.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Redis
|
||||
Redis is a Remote Dictionary Server (that's where Redis get's its name) that is open source and widely used. This is a single instance of in-memory storage for use primarily as a caching layer for other services where data does not need to be persisted. This is not a highly available or fault tolerant setup. If high availability, data persistance, or scalability is a requirement for you, consider a cloud provider like [upstash](https://upstash.com/) or a more robust setup.
|
||||
|
||||
## Nomad Job for Redis
|
||||
Redis requires no configuration but is only available through the service mesh. This means you will need to deploy a service that can connect to the service mesh to access redis. This is a good thing because it means you can easily deploy a redis instance for your application without having to worry about the security of the instance.
|
||||
|
||||
If you need to use the CLI, you can access it through Nomad's exec shell. This will default to /bin/bash which does not exist on alpine linux so you will need to change it to /bin/ash. Once you are in the shell, you can run the redis-cli command to connect to the redis instance.
|
||||
37
nomad_jobs/services/redis/redis-cache.nomad.hcl
Normal file
37
nomad_jobs/services/redis/redis-cache.nomad.hcl
Normal file
@@ -0,0 +1,37 @@
|
||||
job "redis-cache" {
|
||||
datacenters = ["dc1"]
|
||||
|
||||
group "cache" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "redis" {
|
||||
to = 6379
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
# Make available to other services by the 'redis-cache' name
|
||||
name = "redis-cache"
|
||||
port = "6379"
|
||||
tags = ["traefik.enable=false"] # Hide redis from traefik
|
||||
|
||||
# Make available through the consul service mesh
|
||||
connect {
|
||||
sidecar_service {
|
||||
tags = ["traefik.enable=false"] # Hide redis envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "redis" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "redis:7.2.3-alpine"
|
||||
ports = ["redis"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
90
nomad_jobs/services/umami/umami.nomad.hcl
Normal file
90
nomad_jobs/services/umami/umami.nomad.hcl
Normal file
@@ -0,0 +1,90 @@
|
||||
# Listening Domain
|
||||
locals {
|
||||
SUBDOMAIN = "umami" // End with dot or leave blamk for root domain
|
||||
DOMAIN = "example.com"
|
||||
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
|
||||
}
|
||||
|
||||
// OP is 1Password for CLI
|
||||
locals {
|
||||
OP_DB_USER = "op://InfraSecrets/Umami/ENV_SECRETS/PostgresUsername"
|
||||
OP_DB_PASSWORD = "op://InfraSecrets/Umami/ENV_SECRETS/PostgresPassword"
|
||||
OP_AppSecret = "op://InfraSecrets/Umami/ENV_SECRETS/AppSecret"
|
||||
}
|
||||
|
||||
locals {
|
||||
USER_PASSWORD = "${local.OP_DB_USER}:${local.OP_DB_PASSWORD}"
|
||||
|
||||
UMAMI_APPSECRET = "${local.OP_AppSecret}"
|
||||
UMAMI_DB_URL = "postgresql://${local.USER_PASSWORD}@127.0.0.1:5432/umami"
|
||||
UMAMI_DB_TYPE = "postgresql"
|
||||
}
|
||||
|
||||
job "umami" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "application" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "httpIngress" {
|
||||
to = 3000
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "umami"
|
||||
port = "httpIngress"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "postgres"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
}
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.umami.tls=true",
|
||||
"traefik.http.routers.umami.entrypoints=websecure",
|
||||
"traefik.http.routers.umami.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/api/heartbeat"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "umami" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "ghcr.io/umami-software/umami:postgresql-latest"
|
||||
ports = ["httpIngress"]
|
||||
}
|
||||
|
||||
env = {
|
||||
DATABASE_URL="${local.UMAMI_DB_URL}"
|
||||
DATABASE_TYPE="${local.UMAMI_DB_TYPE}"
|
||||
APP_SECRET:"${local.UMAMI_APPSECRET}"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 1000
|
||||
memory = 512
|
||||
memory_max = 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
- name: Update consul config
|
||||
ansible.builtin.copy:
|
||||
mode: preserve
|
||||
src: ./host_config/consul.hcl
|
||||
src: ../host_config/consul.hcl
|
||||
dest: /etc/consul/server.hcl
|
||||
|
||||
- name: Restart consul service
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
- name: Update nomad config
|
||||
ansible.builtin.copy:
|
||||
mode: preserve
|
||||
src: ./host_config/nomad.hcl
|
||||
src: ../host_config/nomad.hcl
|
||||
dest: /etc/nomad.d/server.hcl
|
||||
|
||||
- name: Restart nomad service
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
- name: Update traefik config
|
||||
ansible.builtin.copy:
|
||||
mode: preserve
|
||||
src: ./host_config/traefik.yml
|
||||
src: ../host_config/traefik.yml
|
||||
dest: /etc/traefik/traefik.yaml # Alpine default config is yaml
|
||||
|
||||
- name: Restart traefik service
|
||||
|
||||
Reference in New Issue
Block a user