Compare commits
16 Commits
01337c1e84
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 696813fc72 | |||
| d099c4dcc2 | |||
| 177d7b480c | |||
| 641beb1f81 | |||
| 0e5712a05e | |||
| 318c8e5ae1 | |||
| 75fcfb90bf | |||
| 41c1543979 | |||
| 8663b87f0f | |||
| d4e2b4a241 | |||
| 695acaa48a | |||
| 71c46ad953 | |||
| fdacbec5ea | |||
| 16bb096e34 | |||
| 49a25fff41 | |||
| 829bb876e0 |
@@ -50,6 +50,30 @@ client {
|
||||
path = "/ssd/sqlite/"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
host_volume "jellyfinCache" {
|
||||
path = "/hdd/multimedia/cache/"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
host_volume "jellyfinConfig" {
|
||||
path = "/hdd/multimedia/config/"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
host_volume "media" {
|
||||
path = "/hdd/multimedia/media/"
|
||||
read_only = false
|
||||
}
|
||||
}
|
||||
|
||||
plugin "docker" {
|
||||
config {
|
||||
allow_privileged = true
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ui {
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
- name: Enable community packages
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/apk/repositories
|
||||
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
state: present
|
||||
|
||||
- name: Update apk packages
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
- name: Enable community packages
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/apk/repositories
|
||||
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
state: present
|
||||
|
||||
- name: Update apk packages
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
- name: Enable community packages
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/apk/repositories
|
||||
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
state: present
|
||||
|
||||
- name: Update apk packages
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
- name: Install Caddy on Alpine Linux
|
||||
- name: Install Traefik on Alpine Linux
|
||||
hosts: all
|
||||
|
||||
tasks:
|
||||
- name: Enable community packages
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/apk/repositories
|
||||
regexp: '^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
line: 'http://dl-cdn.alpinelinux.org/alpine/v3.18/community'
|
||||
regexp: "^#http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
line: "http://dl-cdn.alpinelinux.org/alpine/v3.18/community"
|
||||
state: present
|
||||
|
||||
- name: Update apk packages
|
||||
|
||||
@@ -65,7 +65,7 @@ job "penpot-standalone" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "penpotapp/frontend:1.19.3"
|
||||
image = "penpotapp/frontend:2.0.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ job "penpot-standalone" {
|
||||
task "backend" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "penpotapp/backend:1.19.3"
|
||||
image = "penpotapp/backend:2.0.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ job "penpot-standalone" {
|
||||
task "exporter" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "penpotapp/exporter:1.19.3"
|
||||
image = "penpotapp/exporter:2.0.1"
|
||||
}
|
||||
|
||||
env {
|
||||
|
||||
3
nomad_jobs/services/gitea/dockerfile_gitea-act-runner
Normal file
3
nomad_jobs/services/gitea/dockerfile_gitea-act-runner
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM gitea/act_runner:0.2.10-dind-rootless
|
||||
USER root
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
locals {
|
||||
SUBDOMAIN = "git." // End with dot or leave blamk for root domain
|
||||
DOMAIN = "example.local"
|
||||
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
|
||||
}
|
||||
|
||||
job "gitea" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
@@ -8,7 +14,7 @@ job "gitea" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "ingress" {
|
||||
to = 3000
|
||||
static = 3000
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +44,7 @@ job "gitea" {
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.gitea.tls=true",
|
||||
"traefik.http.routers.gitea.entrypoints=websecure",
|
||||
"traefik.http.routers.gitea.rule=Host(`git.example.local`)"
|
||||
"traefik.http.routers.gitea.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
|
||||
]
|
||||
|
||||
check {
|
||||
@@ -53,7 +59,7 @@ job "gitea" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "gitea/gitea:1.21.1"
|
||||
image = "gitea/gitea:1.22.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
@@ -61,6 +67,11 @@ job "gitea" {
|
||||
volume = "gitea-data"
|
||||
destination = "/data"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 3000
|
||||
memory = 2000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,16 +11,42 @@ You will need to modify the job spec items listed under [TODO](./readme.md#TODO)
|
||||
- [Postgres](../postgres/readme.md)
|
||||
|
||||
## TODO
|
||||
If you want to deploy this, you will need to verify you have a valid host volume and update the domain name in the job spec.
|
||||
If you want to deploy this, you will need to verify you have a valid host volume.
|
||||
|
||||
| Line | Default | Adjustment |
|
||||
| --- | --- | --- |
|
||||
| 17 | `source = "gitea-data"` | Change `gitea-data` to a valid host volume name |
|
||||
| 46 | `"traefik.http.routers.caddy.rule=Host('git.example.com')"` | Change `git.example.com` to your domain name |
|
||||
| 66 | `volume = "gitea-data"` | Change `gitea-data` to the host volume defined on line 15 if applicable |
|
||||
|
||||
> To make the instance accessible through TRAEFIK you will need to define the domain to listen on by setting the value(s) on lines 2 and 3.
|
||||
|
||||
|
||||
## Configuring Gitea
|
||||
There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Gitea. Postgres should be mounted to the container on the standard `5432` port so you can select postgres as the database type and use `127.0.0.1:5432` as the address and input the username, password, and database name you created for Gitea to use.
|
||||
|
||||
If you need help making those credentials, take a look at the [postgres readme](../postgres/readme.md#make-a-new-database).
|
||||
|
||||
# Adding CI/CD
|
||||
Gitea has a fork of act runner that can be used to run Github actions. In order to deploy this with Nomad, you will need to leverage Docker in Docker (DinD) with privileged mode enabled in Docker or pay for the bussiness plan of Docker to have better app isolation. The default runner image provided by Gitea was failing to start DinD Daemon so I included a dockerfile that you can use to specify that the container should be ran as the root user.
|
||||
|
||||
1. Build Image
|
||||
```bash
|
||||
docker build --network host --platform linux/amd64 -t <your_gitea_domain>/caleb/nomad_act_runner:0.0.1 .
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> You may not need to specify the platform flag. If you use Apple Silicon but deploy to X86, you will need to include the flag.
|
||||
|
||||
2. Push Image
|
||||
```bash
|
||||
docker push <your_gitea_domain>/caleb/nomad_act_runner:0.0.1
|
||||
```
|
||||
|
||||
4. Run the nomad job with the Gitea_Runner_Token
|
||||
```bash
|
||||
nomad job run -var "grt=<your_token>" -var "domain=<gitea_domain>" runner.nomad.hcl
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> If you prefer to not use cli variables, you can update the top of the Nomad Job Spec and manually put in the env variables.
|
||||
|
||||
|
||||
71
nomad_jobs/services/gitea/runner.nomad.hcl
Normal file
71
nomad_jobs/services/gitea/runner.nomad.hcl
Normal file
@@ -0,0 +1,71 @@
|
||||
variable "grt" {
|
||||
type = string
|
||||
description = "Gitea runner token"
|
||||
}
|
||||
|
||||
variable "domain" {
|
||||
type = string
|
||||
description = "Gitea Domain Name"
|
||||
}
|
||||
|
||||
locals {
|
||||
GITEA_RUNNER_TOKEN = var.grt # Replace with raw token surrounded by quotes if you don't want to pass via cli or using web ui
|
||||
GITEA_DOMAIN = var.domain # Replace with domain surrounded by quotes if you don't want to pass via cli or using web ui
|
||||
GITEA_RUNNER_NAME = "${NOMAD_TASK_NAME}-${NOMAD_ALLOC_INDEX}"
|
||||
}
|
||||
|
||||
job "gitea-runner" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "application" {
|
||||
count = 1
|
||||
|
||||
scaling {
|
||||
enabled = true
|
||||
min = 1
|
||||
max = 5
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "gitea-runner"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "gitea"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 3000
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "gitea-runner" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "${local.GITEA_DOMAIN}/caleb/nomad_act_runner:0.0.1"
|
||||
privileged = true
|
||||
}
|
||||
|
||||
env = {
|
||||
GITEA_INSTANCE_URL="http://${NOMAD_UPSTREAM_ADDR_gitea}"
|
||||
GITEA_RUNNER_REGISTRATION_TOKEN="${local.GITEA_RUNNER_TOKEN}"
|
||||
GITEA_RUNNER_NAME="${local.GITEA_RUNNER_NAME}"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 2000
|
||||
memory = 2000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
89
nomad_jobs/services/jellyfin/jellyfin.nomad.hcl
Normal file
89
nomad_jobs/services/jellyfin/jellyfin.nomad.hcl
Normal file
@@ -0,0 +1,89 @@
|
||||
locals {
|
||||
SUBDOMAIN = "jellyfin." // End with dot or leave blamk for root domain
|
||||
DOMAIN = "example.com"
|
||||
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
|
||||
}
|
||||
|
||||
job "jellyfin" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "application" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "host"
|
||||
port "httpIngress" { static = 8096 }
|
||||
port "serviceDiscovery" { static = 1900 }
|
||||
port "clientDiscovery" { static = 7359 }
|
||||
}
|
||||
|
||||
volume "jellyfin-cache" {
|
||||
type = "host"
|
||||
source = "jellyfinCache"
|
||||
}
|
||||
|
||||
volume "jellyfin-config" {
|
||||
type = "host"
|
||||
source = "jellyfinConfig"
|
||||
}
|
||||
|
||||
volume "jellyfin-data" {
|
||||
type = "host"
|
||||
source = "media"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "jellyfin"
|
||||
port = "httpIngress"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.jellyfin.tls=true",
|
||||
"traefik.http.routers.jellyfin.entrypoints=websecure",
|
||||
"traefik.http.routers.jellyfin.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/health"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "jellyfin" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "jellyfin/jellyfin:2024080505"
|
||||
ports = ["httpIngress", "serviceDiscovery", "clientDiscovery"]
|
||||
}
|
||||
|
||||
env = {
|
||||
JELLYFIN_PublishedServerUrl="${local.TRAEFIK_DOMAIN}"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "jellyfin-cache"
|
||||
destination = "/cache"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "jellyfin-config"
|
||||
destination = "/config"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "jellyfin-data"
|
||||
destination = "/media"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 2000
|
||||
memory = 1024
|
||||
memory_max = 2048
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
36
nomad_jobs/services/jellyfin/readme.md
Normal file
36
nomad_jobs/services/jellyfin/readme.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Jellyfin
|
||||
|
||||
Jellyfin is a Free Software Media System that puts you in control of managing and streaming your media. It is an alternative to the proprietary Emby and Plex, to provide media from a dedicated server to end-user devices via multiple apps.
|
||||
|
||||
## Nomad Job for Gitea
|
||||
|
||||
You will need to modify the job spec items listed under [TODO](./readme.md#TODO) but there are no Jellyfin specific adjustments needed. If you run it, it will register with consul and be available to Traefik for routing. If the domain name is configured correctly, you should be able to reach the Jellyfin setup page to make the needed configuration changes such as defining the media libraries.
|
||||
|
||||
## Service Dependencies
|
||||
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Cache
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Config
|
||||
- A Valid [Host Volume](../../../host_init/README.md#storage-and-zfs) For Media
|
||||
|
||||
## TODO
|
||||
|
||||
If you want to deploy this, you will need to verify you have the necessary host volumes.
|
||||
|
||||
| Line | Default | Adjustment |
|
||||
| ---- | --------------------------- | --------------------------------------------------------------------------- |
|
||||
| 23 | `source = "jellyfinCache"` | Change `jellyfinCache` to a valid host volume name |
|
||||
| 28 | `source = "jellyfinConfig"` | Change `jellyfinConfig` to a valid host volume name |
|
||||
| 33 | `source = "media"` | Change `media` to a valid host volume name |
|
||||
| 68 | `volume = "jellyfinCache"` | Change `jellyfinCache` to the host volume defined on line 21 if applicable |
|
||||
| 74 | `volume = "jellyfinConfig"` | Change `jellyfinConfig` to the host volume defined on line 26 if applicable |
|
||||
| 79 | `volume = "media"` | Change `media` to the host volume defined on line 31 if applicable |
|
||||
|
||||
> To make the instance accessible through TRAEFIK you will need to define the domain to listen on by setting the value(s) on lines 2 and 3.
|
||||
|
||||
## Configuring Jellyfin
|
||||
|
||||
There is no need to embed secrets in the nomad job spec. When you first visit the domain name you configured, you will be prompted to configure Jellyfin.
|
||||
|
||||
> I recomend using a single root directory for media and then creating subdirectories for each type of media. This will make it easier to manage the media via SFTP and to configure Jellyfin.
|
||||
|
||||
> If this is deployed on Alpine Linux, you won't be able to pass through dedicated NVIDIA hardware because `nvidia-container-toolkit` is not available on MUSL. You will need to use a different root operating system like Ubuntu if you want hardware acceleration with NVIDIA hardware.
|
||||
@@ -1,6 +1,8 @@
|
||||
# Listening Domain
|
||||
locals {
|
||||
TRAEFIK_DOMAIN = "penpot.example.local"
|
||||
SUBDOMAIN = "penpot." // End with dot or leave blamk for root domain
|
||||
DOMAIN = "example.local"
|
||||
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
|
||||
}
|
||||
|
||||
# Application routing environment variables
|
||||
@@ -8,7 +10,7 @@ locals {
|
||||
PENPOT_PUBLIC_URI = "https://${local.TRAEFIK_DOMAIN}"
|
||||
PENPOT_BACKEND_URI = "http://127.0.0.1:6060"
|
||||
PENPOT_EXPORTER_URI = "http://127.0.0.1:6061"
|
||||
PENPOT_FLAGS = "enable-smtp enable-registration enable-login-with-password enable-prepl-server enable-demo-users"
|
||||
PENPOT_FLAGS = "enable-smtp enable-registration enable-login-with-password enable-demo-users"
|
||||
PENPOT_SECRET_KEY = "op://InfraSecrets/7hbsxng22unjqc4wkj62qniu2u/credential" # Try running `openssl rand -hex 32` to generate a random secret key
|
||||
PENPOT_DATABASE_URI = "postgresql://127.0.0.1:5432/penpot"
|
||||
PENPOT_DATABASE_USERNAME = "op://InfraSecrets/Postgres - Penpot User/username"
|
||||
@@ -31,8 +33,8 @@ locals {
|
||||
|
||||
# SMTP environment variables
|
||||
locals {
|
||||
PENPOT_SMTP_DEFAULT_FROM = "no-reply+penpot@example.local"
|
||||
PENPOT_SMTP_DEFAULT_REPLY_TO = "no-reply+penpot@example.local"
|
||||
PENPOT_SMTP_DEFAULT_FROM = "no-reply+penpot@${local.DOMAIN}"
|
||||
PENPOT_SMTP_DEFAULT_REPLY_TO = "no-reply+penpot@${local.DOMAIN}"
|
||||
PENPOT_SMTP_HOST = "127.0.0.1"
|
||||
PENPOT_SMTP_PORT = "1025"
|
||||
PENPOT_SMTP_USERNAME = ""
|
||||
@@ -101,7 +103,7 @@ job "penpot" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "penpotapp/frontend:1.19.3"
|
||||
image = "penpotapp/frontend:2.0.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
@@ -160,7 +162,7 @@ job "penpot" {
|
||||
task "backend" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "penpotapp/backend:1.19.3"
|
||||
image = "penpotapp/backend:2.0.1"
|
||||
ports = ["ingress"]
|
||||
}
|
||||
|
||||
@@ -208,7 +210,7 @@ job "penpot" {
|
||||
task "exporter" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "penpotapp/exporter:1.19.3"
|
||||
image = "penpotapp/exporter:2.0.1"
|
||||
}
|
||||
|
||||
env {
|
||||
|
||||
90
nomad_jobs/services/umami/umami.nomad.hcl
Normal file
90
nomad_jobs/services/umami/umami.nomad.hcl
Normal file
@@ -0,0 +1,90 @@
|
||||
# Listening Domain
|
||||
locals {
|
||||
SUBDOMAIN = "umami" // End with dot or leave blamk for root domain
|
||||
DOMAIN = "example.com"
|
||||
TRAEFIK_DOMAIN = "${local.SUBDOMAIN}${local.DOMAIN}"
|
||||
}
|
||||
|
||||
// OP is 1Password for CLI
|
||||
locals {
|
||||
OP_DB_USER = "op://InfraSecrets/Umami/ENV_SECRETS/PostgresUsername"
|
||||
OP_DB_PASSWORD = "op://InfraSecrets/Umami/ENV_SECRETS/PostgresPassword"
|
||||
OP_AppSecret = "op://InfraSecrets/Umami/ENV_SECRETS/AppSecret"
|
||||
}
|
||||
|
||||
locals {
|
||||
USER_PASSWORD = "${local.OP_DB_USER}:${local.OP_DB_PASSWORD}"
|
||||
|
||||
UMAMI_APPSECRET = "${local.OP_AppSecret}"
|
||||
UMAMI_DB_URL = "postgresql://${local.USER_PASSWORD}@127.0.0.1:5432/umami"
|
||||
UMAMI_DB_TYPE = "postgresql"
|
||||
}
|
||||
|
||||
job "umami" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
group "application" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "httpIngress" {
|
||||
to = 3000
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "umami"
|
||||
port = "httpIngress"
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "postgres"
|
||||
local_bind_address = "127.0.0.1"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
}
|
||||
tags = ["traefik.enable=false"] # Hide envoy from traefik
|
||||
}
|
||||
}
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.umami.tls=true",
|
||||
"traefik.http.routers.umami.entrypoints=websecure",
|
||||
"traefik.http.routers.umami.rule=Host(`${local.TRAEFIK_DOMAIN}`)"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/api/heartbeat"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "umami" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "ghcr.io/umami-software/umami:postgresql-latest"
|
||||
ports = ["httpIngress"]
|
||||
}
|
||||
|
||||
env = {
|
||||
DATABASE_URL="${local.UMAMI_DB_URL}"
|
||||
DATABASE_TYPE="${local.UMAMI_DB_TYPE}"
|
||||
APP_SECRET:"${local.UMAMI_APPSECRET}"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 1000
|
||||
memory = 512
|
||||
memory_max = 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
- name: Update consul config
|
||||
ansible.builtin.copy:
|
||||
mode: preserve
|
||||
src: ./host_config/consul.hcl
|
||||
src: ../host_config/consul.hcl
|
||||
dest: /etc/consul/server.hcl
|
||||
|
||||
- name: Restart consul service
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
- name: Update nomad config
|
||||
ansible.builtin.copy:
|
||||
mode: preserve
|
||||
src: ./host_config/nomad.hcl
|
||||
src: ../host_config/nomad.hcl
|
||||
dest: /etc/nomad.d/server.hcl
|
||||
|
||||
- name: Restart nomad service
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
- name: Update traefik config
|
||||
ansible.builtin.copy:
|
||||
mode: preserve
|
||||
src: ./host_config/traefik.yml
|
||||
src: ../host_config/traefik.yml
|
||||
dest: /etc/traefik/traefik.yaml # Alpine default config is yaml
|
||||
|
||||
- name: Restart traefik service
|
||||
|
||||
Reference in New Issue
Block a user