This commit is contained in:
2025-10-09 01:22:22 +00:00
parent 1c994f9f60
commit eab95c8c80
136 changed files with 11001 additions and 849 deletions

View File

@@ -2,10 +2,25 @@ job "consul-cluster-nomad" {
datacenters = ["dc1"]
type = "service"
group "consul-master" {
group "consul-ch4" {
constraint {
attribute = "${node.unique.name}"
value = "master"
value = "ch4"
}
network {
port "http" {
static = 8500
}
port "server" {
static = 8300
}
port "serf-lan" {
static = 8301
}
port "serf-wan" {
static = 8302
}
}
task "consul" {
@@ -16,18 +31,18 @@ job "consul-cluster-nomad" {
args = [
"agent",
"-server",
"-bootstrap-expect=3",
"-bootstrap-expect=2",
"-data-dir=/opt/nomad/data/consul",
"-client=0.0.0.0",
"-bind=100.117.106.136",
"-advertise=100.117.106.136",
"-retry-join=100.116.80.94",
"-retry-join=100.122.197.112",
"-bind={{ env "NOMAD_IP_http" }}",
"-advertise={{ env "NOMAD_IP_http" }}",
"-retry-join=ash3c.tailnet-68f9.ts.net:8301",
"-retry-join=warden.tailnet-68f9.ts.net:8301",
"-ui",
"-http-port=8500",
"-server-port=8300",
"-serf-lan-port=8301",
"-serf-wan-port=8302"
"-serf-wan-port=8302",
]
}
@@ -45,6 +60,21 @@ job "consul-cluster-nomad" {
value = "ash3c"
}
network {
port "http" {
static = 8500
}
port "server" {
static = 8300
}
port "serf-lan" {
static = 8301
}
port "serf-wan" {
static = 8302
}
}
task "consul" {
driver = "exec"
@@ -53,13 +83,12 @@ job "consul-cluster-nomad" {
args = [
"agent",
"-server",
"-bootstrap-expect=3",
"-data-dir=/opt/nomad/data/consul",
"-client=0.0.0.0",
"-bind=100.116.80.94",
"-advertise=100.116.80.94",
"-retry-join=100.117.106.136",
"-retry-join=100.122.197.112",
"-bind={{ env "NOMAD_IP_http" }}",
"-advertise={{ env "NOMAD_IP_http" }}",
"-retry-join=ch4.tailnet-68f9.ts.net:8301",
"-retry-join=warden.tailnet-68f9.ts.net:8301",
"-ui",
"-http-port=8500",
"-server-port=8300",
@@ -82,6 +111,21 @@ job "consul-cluster-nomad" {
value = "warden"
}
network {
port "http" {
static = 8500
}
port "server" {
static = 8300
}
port "serf-lan" {
static = 8301
}
port "serf-wan" {
static = 8302
}
}
task "consul" {
driver = "exec"
@@ -90,13 +134,12 @@ job "consul-cluster-nomad" {
args = [
"agent",
"-server",
"-bootstrap-expect=3",
"-data-dir=/opt/nomad/data/consul",
"-client=0.0.0.0",
"-bind=100.122.197.112",
"-advertise=100.122.197.112",
"-retry-join=100.117.106.136",
"-retry-join=100.116.80.94",
"-bind={{ env "NOMAD_IP_http" }}",
"-advertise={{ env "NOMAD_IP_http" }}",
"-retry-join=ch4.tailnet-68f9.ts.net:8301",
"-retry-join=ash3c.tailnet-68f9.ts.net:8301",
"-ui",
"-http-port=8500",
"-server-port=8300",

View File

@@ -0,0 +1,158 @@
job "consul-cluster-nomad" {
datacenters = ["dc1"]
type = "service"
group "consul-ch4" {
constraint {
attribute = "${node.unique.name}"
value = "ch4"
}
network {
port "http" {
static = 8500
}
port "server" {
static = 8300
}
port "serf-lan" {
static = 8301
}
port "serf-wan" {
static = 8302
}
}
task "consul" {
driver = "exec"
config {
command = "consul"
args = [
"agent",
"-server",
"-bootstrap-expect=3",
"-data-dir=/opt/nomad/data/consul",
"-client=0.0.0.0",
"-bind={{ env "NOMAD_IP_http" }}",
"-advertise={{ env "NOMAD_IP_http" }}",
"-retry-join=ash3c.tailnet-68f9.ts.net:8301",
"-retry-join=warden.tailnet-68f9.ts.net:8301",
"-ui",
"-http-port=8500",
"-server-port=8300",
"-serf-lan-port=8301",
"-serf-wan-port=8302"
]
}
resources {
cpu = 300
memory = 512
}
}
}
group "consul-ash3c" {
constraint {
attribute = "${node.unique.name}"
value = "ash3c"
}
network {
port "http" {
static = 8500
}
port "server" {
static = 8300
}
port "serf-lan" {
static = 8301
}
port "serf-wan" {
static = 8302
}
}
task "consul" {
driver = "exec"
config {
command = "consul"
args = [
"agent",
"-server",
"-data-dir=/opt/nomad/data/consul",
"-client=0.0.0.0",
"-bind={{ env "NOMAD_IP_http" }}",
"-advertise={{ env "NOMAD_IP_http" }}",
"-retry-join=ch4.tailnet-68f9.ts.net:8301",
"-retry-join=warden.tailnet-68f9.ts.net:8301",
"-ui",
"-http-port=8500",
"-server-port=8300",
"-serf-lan-port=8301",
"-serf-wan-port=8302"
]
}
resources {
cpu = 300
memory = 512
}
}
}
group "consul-warden" {
constraint {
attribute = "${node.unique.name}"
value = "warden"
}
network {
port "http" {
static = 8500
}
port "server" {
static = 8300
}
port "serf-lan" {
static = 8301
}
port "serf-wan" {
static = 8302
}
}
task "consul" {
driver = "exec"
config {
command = "consul"
args = [
"agent",
"-server",
"-data-dir=/opt/nomad/data/consul",
"-client=0.0.0.0",
"-bind={{ env "NOMAD_IP_http" }}",
"-advertise={{ env "NOMAD_IP_http" }}",
"-retry-join=ch4.tailnet-68f9.ts.net:8301",
"-retry-join=ash3c.tailnet-68f9.ts.net:8301",
"-ui",
"-http-port=8500",
"-server-port=8300",
"-serf-lan-port=8301",
"-serf-wan-port=8302"
]
}
resources {
cpu = 300
memory = 512
}
}
}
}

View File

@@ -1,110 +0,0 @@
job "install-podman-driver" {
datacenters = ["dc1"]
type = "system" # 在所有节点上运行
group "install" {
task "install-podman" {
driver = "exec"
config {
command = "bash"
args = [
"-c",
<<-EOF
set -euo pipefail
export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"
# 依赖工具
if ! command -v jq >/dev/null 2>&1 || ! command -v unzip >/dev/null 2>&1 || ! command -v wget >/dev/null 2>&1; then
echo "Installing dependencies (jq unzip wget)..."
sudo -n apt update -y || true
sudo -n apt install -y jq unzip wget || true
fi
# 安装 Podman若未安装
if ! command -v podman >/dev/null 2>&1; then
echo "Installing Podman..."
sudo -n apt update -y || true
sudo -n apt install -y podman || true
sudo -n systemctl enable podman || true
else
echo "Podman already installed"
fi
# 启用并启动 podman.socket确保 Nomad 可访问
sudo -n systemctl enable --now podman.socket || true
if getent group podman >/dev/null 2>&1; then
sudo -n usermod -aG podman nomad || true
fi
# 安装 Nomad Podman 驱动插件(始终确保存在)
PODMAN_DRIVER_VERSION="0.6.1"
PLUGIN_DIR="/opt/nomad/data/plugins"
sudo -n mkdir -p "${PLUGIN_DIR}" || true
cd /tmp
if [ ! -x "${PLUGIN_DIR}/nomad-driver-podman" ]; then
echo "Installing nomad-driver-podman ${PODMAN_DRIVER_VERSION}..."
wget -q "https://releases.hashicorp.com/nomad-driver-podman/${PODMAN_DRIVER_VERSION}/nomad-driver-podman_${PODMAN_DRIVER_VERSION}_linux_amd64.zip"
unzip -o "nomad-driver-podman_${PODMAN_DRIVER_VERSION}_linux_amd64.zip"
sudo -n mv -f nomad-driver-podman "${PLUGIN_DIR}/"
sudo -n chmod +x "${PLUGIN_DIR}/nomad-driver-podman"
sudo -n chown -R nomad:nomad "${PLUGIN_DIR}"
rm -f "nomad-driver-podman_${PODMAN_DRIVER_VERSION}_linux_amd64.zip"
else
echo "nomad-driver-podman already present in ${PLUGIN_DIR}"
fi
# 更新 /etc/nomad.d/nomad.hcl 的 plugin_dir 设置
if [ -f /etc/nomad.d/nomad.hcl ]; then
if grep -q "^plugin_dir\s*=\s*\"" /etc/nomad.d/nomad.hcl; then
sudo -n sed -i 's#^plugin_dir\s*=\s*\".*\"#plugin_dir = "/opt/nomad/data/plugins"#' /etc/nomad.d/nomad.hcl || true
else
echo 'plugin_dir = "/opt/nomad/data/plugins"' | sudo -n tee -a /etc/nomad.d/nomad.hcl >/dev/null || true
fi
fi
# 重启 Nomad 服务以加载插件
sudo -n systemctl restart nomad || true
echo "Waiting for Nomad to restart..."
sleep 15
# 检查 Podman 驱动是否被 Nomad 检测到
if /usr/local/bin/nomad node status -self -json 2>/dev/null | jq -r '.Drivers.podman.Detected' | grep -q "true"; then
echo "Podman driver successfully loaded"
exit 0
fi
echo "Podman driver not detected yet, retrying once after socket restart..."
sudo -n systemctl restart podman.socket || true
sleep 5
if /usr/local/bin/nomad node status -self -json 2>/dev/null | jq -r '.Drivers.podman.Detected' | grep -q "true"; then
echo "Podman driver successfully loaded after socket restart"
exit 0
else
echo "Podman driver still not detected; manual investigation may be required"
exit 1
fi
EOF
]
}
resources {
cpu = 200
memory = 256
}
// 以root权限运行
// user = "root"
# 使用 nomad 用户运行任务,避免客户端策略禁止 root
user = "nomad"
# 确保任务成功完成
restart {
attempts = 1
interval = "24h"
delay = "60s"
mode = "fail"
}
}
}
}

View File

@@ -0,0 +1,43 @@
job "juicefs-controller" {
datacenters = ["dc1"]
type = "system"
group "controller" {
task "plugin" {
driver = "podman"
config {
image = "juicedata/juicefs-csi-driver:v0.14.1"
args = [
"--endpoint=unix://csi/csi.sock",
"--logtostderr",
"--nodeid=${node.unique.id}",
"--v=5",
"--by-process=true"
]
privileged = true
}
csi_plugin {
id = "juicefs-nfs"
type = "controller"
mount_dir = "/csi"
}
resources {
cpu = 100
memory = 512
}
env {
POD_NAME = "csi-controller"
}
}
}
}

View File

@@ -0,0 +1,38 @@
job "juicefs-csi-controller" {
datacenters = ["dc1"]
type = "system"
group "controller" {
task "juicefs-csi-driver" {
driver = "podman"
config {
image = "juicedata/juicefs-csi-driver:v0.14.1"
args = [
"--endpoint=unix://csi/csi.sock",
"--logtostderr",
"--nodeid=${node.unique.id}",
"--v=5"
]
privileged = true
}
env {
POD_NAME = "juicefs-csi-controller"
POD_NAMESPACE = "default"
NODE_NAME = "${node.unique.id}"
}
csi_plugin {
id = "juicefs0"
type = "controller"
mount_dir = "/csi"
}
resources {
cpu = 100
memory = 512
}
}
}
}

View File

@@ -1,55 +0,0 @@
job "nomad-consul-config" {
datacenters = ["dc1"]
type = "system"
group "nomad-server-config" {
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "semaphore|ash1d|ash2e|ch2|ch3|onecloud1|de"
}
task "update-nomad-config" {
driver = "exec"
config {
command = "sh"
args = [
"-c",
"sed -i '/^consul {/,/^}/c\\consul {\\n address = \"master.tailnet-68f9.ts.net:8500,ash3c.tailnet-68f9.ts.net:8500,warden.tailnet-68f9.ts.net:8500\"\\n server_service_name = \"nomad\"\\n client_service_name = \"nomad-client\"\\n auto_advertise = true\\n server_auto_join = true\\n client_auto_join = false\\n}' /etc/nomad.d/nomad.hcl && systemctl restart nomad"
]
}
resources {
cpu = 100
memory = 128
}
}
}
group "nomad-client-config" {
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "master|ash3c|browser|influxdb1|hcp1|warden"
}
task "update-nomad-config" {
driver = "exec"
config {
command = "sh"
args = [
"-c",
"sed -i '/^consul {/,/^}/c\\consul {\\n address = \"master.tailnet-68f9.ts.net:8500,ash3c.tailnet-68f9.ts.net:8500,warden.tailnet-68f9.ts.net:8500\"\\n server_service_name = \"nomad\"\\n client_service_name = \"nomad-client\"\\n auto_advertise = true\\n server_auto_join = false\\n client_auto_join = true\\n}' /etc/nomad.d/nomad.hcl && systemctl restart nomad"
]
}
resources {
cpu = 100
memory = 128
}
}
}
}

View File

@@ -1,23 +0,0 @@
job "nomad-consul-setup" {
datacenters = ["dc1"]
type = "system"
group "nomad-config" {
task "setup-consul" {
driver = "exec"
config {
command = "sh"
args = [
"-c",
"if grep -q 'server.*enabled.*true' /etc/nomad.d/nomad.hcl; then sed -i '/^consul {/,/^}/c\\consul {\\n address = \"master.tailnet-68f9.ts.net:8500,ash3c.tailnet-68f9.ts.net:8500,warden.tailnet-68f9.ts.net:8500\"\\n server_service_name = \"nomad\"\\n client_service_name = \"nomad-client\"\\n auto_advertise = true\\n server_auto_join = true\\n client_auto_join = false\\n}' /etc/nomad.d/nomad.hcl; else sed -i '/^consul {/,/^}/c\\consul {\\n address = \"master.tailnet-68f9.ts.net:8500,ash3c.tailnet-68f9.ts.net:8500,warden.tailnet-68f9.ts.net:8500\"\\n server_service_name = \"nomad\"\\n client_service_name = \"nomad-client\"\\n auto_advertise = true\\n server_auto_join = false\\n client_auto_join = true\\n}' /etc/nomad.d/nomad.hcl; fi && systemctl restart nomad"
]
}
resources {
cpu = 100
memory = 128
}
}
}
}

View File

@@ -1,34 +0,0 @@
job "nfs-volume-example" {
datacenters = ["dc1"]
type = "service"
group "nfs-app" {
count = 1
volume "nfs-shared" {
type = "host"
source = "nfs-shared"
read_only = false
}
task "app" {
driver = "podman"
config {
image = "alpine:latest"
args = ["tail", "-f", "/dev/null"]
}
volume_mount {
volume = "nfs-shared"
destination = "/shared"
read_only = false
}
resources {
cpu = 100
memory = 64
}
}
}
}

View File

@@ -0,0 +1,43 @@
# NFS CSI Volume Definition for Nomad
# 这个文件定义了CSI volume让NFS存储能在Nomad UI中显示
volume "nfs-shared-csi" {
type = "csi"
# CSI plugin名称
source = "csi-nfs"
# 容量设置
capacity_min = "1GiB"
capacity_max = "10TiB"
# 访问模式 - 支持多节点读写
access_mode = "multi-node-multi-writer"
# 挂载选项
mount_options {
fs_type = "nfs4"
mount_flags = "rw,relatime,vers=4.2"
}
# 拓扑约束 - 确保在有NFS挂载的节点上运行
topology_request {
required {
topology {
"node" = "{{ range $node := nomadNodes }}{{ if eq $node.Status "ready" }}{{ $node.Name }}{{ end }}{{ end }}"
}
}
}
# 卷参数
parameters {
server = "snail"
share = "/fs/1000/nfs/Fnsync"
}
}

View File

@@ -0,0 +1,22 @@
# Dynamic Host Volume Definition for NFS
# 这个文件定义了动态host volume让NFS存储能在Nomad UI中显示
volume "nfs-shared-dynamic" {
type = "host"
# 使用动态host volume
source = "fnsync"
# 只读设置
read_only = false
# 容量信息(用于显示)
capacity_min = "1GiB"
capacity_max = "10TiB"
}

View File

@@ -0,0 +1,22 @@
# NFS Host Volume Definition for Nomad UI
# 这个文件定义了host volume让NFS存储能在Nomad UI中显示
volume "nfs-shared-host" {
type = "host"
# 使用host volume
source = "fnsync"
# 只读设置
read_only = false
# 容量信息(用于显示)
capacity_min = "1GiB"
capacity_max = "10TiB"
}

View File

@@ -0,0 +1,123 @@
http:
serversTransports:
waypoint-insecure:
insecureSkipVerify: true
authentik-insecure:
insecureSkipVerify: true
middlewares:
consul-stripprefix:
stripPrefix:
prefixes:
- "/consul"
waypoint-auth:
replacePathRegex:
regex: "^/auth/token(.*)$"
replacement: "/auth/token$1"
services:
consul-cluster:
loadBalancer:
servers:
- url: "http://ch4.tailnet-68f9.ts.net:8500" # 韩国Leader
- url: "http://warden.tailnet-68f9.ts.net:8500" # 北京Follower
- url: "http://ash3c.tailnet-68f9.ts.net:8500" # 美国Follower
healthCheck:
path: "/v1/status/leader"
interval: "30s"
timeout: "15s"
nomad-cluster:
loadBalancer:
servers:
- url: "http://ch2.tailnet-68f9.ts.net:4646" # 韩国Leader
- url: "http://warden.tailnet-68f9.ts.net:4646" # 北京Follower
- url: "http://ash3c.tailnet-68f9.ts.net:4646" # 美国Follower
healthCheck:
path: "/v1/status/leader"
interval: "30s"
timeout: "15s"
waypoint-cluster:
loadBalancer:
servers:
- url: "https://hcp1.tailnet-68f9.ts.net:9701" # hcp1 节点 HTTPS API
serversTransport: waypoint-insecure
vault-cluster:
loadBalancer:
servers:
- url: "http://warden.tailnet-68f9.ts.net:8200" # 北京,单节点
healthCheck:
path: "/ui/"
interval: "30s"
timeout: "15s"
authentik-cluster:
loadBalancer:
servers:
- url: "https://authentik.tailnet-68f9.ts.net:9443" # Authentik容器HTTPS端口
serversTransport: authentik-insecure
healthCheck:
path: "/flows/-/default/authentication/"
interval: "30s"
timeout: "15s"
routers:
consul-api:
rule: "Host(`consul.git4ta.tech`)"
service: consul-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
middlewares:
- consul-stripprefix
consul-ui:
rule: "Host(`consul.git-4ta.live`) && PathPrefix(`/ui`)"
service: consul-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
nomad-api:
rule: "Host(`nomad.git-4ta.live`)"
service: nomad-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
nomad-ui:
rule: "Host(`nomad.git-4ta.live`) && PathPrefix(`/ui`)"
service: nomad-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
waypoint-ui:
rule: "Host(`waypoint.git-4ta.live`)"
service: waypoint-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
vault-ui:
rule: "Host(`vault.git-4ta.live`)"
service: vault-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
authentik-ui:
rule: "Host(`authentik1.git-4ta.live`)"
service: authentik-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare

View File

@@ -0,0 +1,254 @@
job "traefik-cloudflare-v2" {
datacenters = ["dc1"]
type = "service"
group "traefik" {
count = 1
constraint {
attribute = "${node.unique.name}"
operator = "="
value = "hcp1"
}
volume "traefik-certs" {
type = "host"
read_only = false
source = "traefik-certs"
}
network {
mode = "host"
port "http" {
static = 80
}
port "https" {
static = 443
}
port "traefik" {
static = 8080
}
}
task "traefik" {
driver = "exec"
config {
command = "/usr/local/bin/traefik"
args = [
"--configfile=/local/traefik.yml"
]
}
env {
CLOUDFLARE_EMAIL = "houzhongxu.houzhongxu@gmail.com"
CLOUDFLARE_DNS_API_TOKEN = "HYT-cfZTP_jq6Xd9g3tpFMwxopOyIrf8LZpmGAI3"
CLOUDFLARE_ZONE_API_TOKEN = "HYT-cfZTP_jq6Xd9g3tpFMwxopOyIrf8LZpmGAI3"
}
volume_mount {
volume = "traefik-certs"
destination = "/opt/traefik/certs"
read_only = false
}
template {
data = <<EOF
api:
dashboard: true
insecure: true
debug: true
entryPoints:
web:
address: "0.0.0.0:80"
http:
redirections:
entrypoint:
to: websecure
scheme: https
permanent: true
websecure:
address: "0.0.0.0:443"
traefik:
address: "0.0.0.0:8080"
providers:
consulCatalog:
endpoint:
address: "warden.tailnet-68f9.ts.net:8500"
scheme: "http"
watch: true
exposedByDefault: false
prefix: "traefik"
defaultRule: "Host(`{{ .Name }}.git-4ta.live`)"
file:
filename: /local/dynamic.yml
watch: true
certificatesResolvers:
cloudflare:
acme:
email: {{ env "CLOUDFLARE_EMAIL" }}
storage: /opt/traefik/certs/acme.json
dnsChallenge:
provider: cloudflare
delayBeforeCheck: 30s
resolvers:
- "1.1.1.1:53"
- "1.0.0.1:53"
log:
level: DEBUG
EOF
destination = "local/traefik.yml"
}
template {
data = <<EOF
http:
serversTransports:
waypoint-insecure:
insecureSkipVerify: true
authentik-insecure:
insecureSkipVerify: true
middlewares:
consul-stripprefix:
stripPrefix:
prefixes:
- "/consul"
waypoint-auth:
replacePathRegex:
regex: "^/auth/token(.*)$"
replacement: "/auth/token$1"
services:
consul-cluster:
loadBalancer:
servers:
- url: "http://ch4.tailnet-68f9.ts.net:8500" # 韩国Leader
- url: "http://warden.tailnet-68f9.ts.net:8500" # 北京Follower
- url: "http://ash3c.tailnet-68f9.ts.net:8500" # 美国Follower
healthCheck:
path: "/v1/status/leader"
interval: "30s"
timeout: "15s"
nomad-cluster:
loadBalancer:
servers:
- url: "http://ch2.tailnet-68f9.ts.net:4646" # 韩国Leader
- url: "http://warden.tailnet-68f9.ts.net:4646" # 北京Follower
- url: "http://ash3c.tailnet-68f9.ts.net:4646" # 美国Follower
healthCheck:
path: "/v1/status/leader"
interval: "30s"
timeout: "15s"
waypoint-cluster:
loadBalancer:
servers:
- url: "https://hcp1.tailnet-68f9.ts.net:9701" # hcp1 节点 HTTPS API
serversTransport: waypoint-insecure
vault-cluster:
loadBalancer:
servers:
- url: "http://warden.tailnet-68f9.ts.net:8200" # 北京,单节点
healthCheck:
path: "/ui/"
interval: "30s"
timeout: "15s"
authentik-cluster:
loadBalancer:
servers:
- url: "https://authentik.tailnet-68f9.ts.net:9443" # Authentik容器HTTPS端口
serversTransport: authentik-insecure
healthCheck:
path: "/flows/-/default/authentication/"
interval: "30s"
timeout: "15s"
routers:
consul-api:
rule: "Host(`consul.git-4ta.live`)"
service: consul-cluster
middlewares:
- consul-stripprefix
entryPoints:
- websecure
tls:
certResolver: cloudflare
traefik-dashboard:
rule: "Host(`traefik.git-4ta.live`)"
service: dashboard@internal
middlewares:
- dashboard_redirect@internal
- dashboard_stripprefix@internal
entryPoints:
- websecure
tls:
certResolver: cloudflare
nomad-ui:
rule: "Host(`nomad.git-4ta.live`)"
service: nomad-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
waypoint-ui:
rule: "Host(`waypoint.git-4ta.live`)"
service: waypoint-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
vault-ui:
rule: "Host(`vault.git-4ta.live`)"
service: vault-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
authentik-ui:
rule: "Host(`authentik.git-4ta.live`)"
service: authentik-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
EOF
destination = "local/dynamic.yml"
}
template {
data = <<EOF
CLOUDFLARE_EMAIL={{ env "CLOUDFLARE_EMAIL" }}
CLOUDFLARE_DNS_API_TOKEN={{ env "CLOUDFLARE_DNS_API_TOKEN" }}
CLOUDFLARE_ZONE_API_TOKEN={{ env "CLOUDFLARE_ZONE_API_TOKEN" }}
EOF
destination = "local/cloudflare.env"
env = true
}
# 测试证书权限控制
template {
data = "-----BEGIN CERTIFICATE-----\nTEST CERTIFICATE FOR PERMISSION CONTROL\n-----END CERTIFICATE-----"
destination = "/opt/traefik/certs/test-cert.pem"
perms = 600
}
resources {
cpu = 500
memory = 512
}
}
}
}

View File

@@ -0,0 +1,239 @@
job "traefik-cloudflare-v2" {
datacenters = ["dc1"]
type = "service"
group "traefik" {
count = 1
constraint {
attribute = "${node.unique.name}"
value = "hcp1"
}
volume "traefik-certs" {
type = "host"
read_only = false
source = "traefik-certs"
}
network {
mode = "host"
port "http" {
static = 80
}
port "https" {
static = 443
}
port "traefik" {
static = 8080
}
}
task "traefik" {
driver = "exec"
config {
command = "/usr/local/bin/traefik"
args = [
"--configfile=/local/traefik.yml"
]
}
volume_mount {
volume = "traefik-certs"
destination = "/opt/traefik/certs"
read_only = false
}
template {
data = <<EOF
api:
dashboard: true
insecure: true
entryPoints:
web:
address: "0.0.0.0:80"
http:
redirections:
entrypoint:
to: websecure
scheme: https
permanent: true
websecure:
address: "0.0.0.0:443"
traefik:
address: "0.0.0.0:8080"
providers:
consulCatalog:
endpoint:
address: "warden.tailnet-68f9.ts.net:8500"
scheme: "http"
watch: true
exposedByDefault: false
prefix: "traefik"
defaultRule: "Host(`{{ .Name }}.git-4ta.live`)"
file:
filename: /local/dynamic.yml
watch: true
certificatesResolvers:
cloudflare:
acme:
email: houzhongxu.houzhongxu@gmail.com
storage: /opt/traefik/certs/acme.json
dnsChallenge:
provider: cloudflare
delayBeforeCheck: 30s
resolvers:
- "1.1.1.1:53"
- "1.0.0.1:53"
log:
level: DEBUG
EOF
destination = "local/traefik.yml"
}
template {
data = <<EOF
http:
serversTransports:
waypoint-insecure:
insecureSkipVerify: true
authentik-insecure:
insecureSkipVerify: true
middlewares:
consul-stripprefix:
stripPrefix:
prefixes:
- "/consul"
waypoint-auth:
replacePathRegex:
regex: "^/auth/token(.*)$"
replacement: "/auth/token$1"
services:
consul-cluster:
loadBalancer:
servers:
- url: "http://ch4.tailnet-68f9.ts.net:8500" # 韩国Leader
- url: "http://warden.tailnet-68f9.ts.net:8500" # 北京Follower
- url: "http://ash3c.tailnet-68f9.ts.net:8500" # 美国Follower
healthCheck:
path: "/v1/status/leader"
interval: "30s"
timeout: "15s"
nomad-cluster:
loadBalancer:
servers:
- url: "http://ch2.tailnet-68f9.ts.net:4646" # 韩国Leader
- url: "http://warden.tailnet-68f9.ts.net:4646" # 北京Follower
- url: "http://ash3c.tailnet-68f9.ts.net:4646" # 美国Follower
healthCheck:
path: "/v1/status/leader"
interval: "30s"
timeout: "15s"
waypoint-cluster:
loadBalancer:
servers:
- url: "https://hcp1.tailnet-68f9.ts.net:9701" # hcp1 节点 HTTPS API
serversTransport: waypoint-insecure
vault-cluster:
loadBalancer:
servers:
- url: "http://warden.tailnet-68f9.ts.net:8200" # 北京,单节点
healthCheck:
path: "/ui/"
interval: "30s"
timeout: "15s"
authentik-cluster:
loadBalancer:
servers:
- url: "https://authentik.tailnet-68f9.ts.net:9443" # Authentik容器HTTPS端口
serversTransport: authentik-insecure
healthCheck:
path: "/flows/-/default/authentication/"
interval: "30s"
timeout: "15s"
routers:
consul-api:
rule: "Host(`consul.git-4ta.live`)"
service: consul-cluster
middlewares:
- consul-stripprefix
entryPoints:
- websecure
tls:
certResolver: cloudflare
traefik-dashboard:
rule: "Host(`traefik.git-4ta.live`)"
service: dashboard@internal
middlewares:
- dashboard_redirect@internal
- dashboard_stripprefix@internal
entryPoints:
- websecure
tls:
certResolver: cloudflare
nomad-ui:
rule: "Host(`nomad.git-4ta.live`)"
service: nomad-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
waypoint-ui:
rule: "Host(`waypoint.git-4ta.live`)"
service: waypoint-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
vault-ui:
rule: "Host(`vault.git-4ta.live`)"
service: vault-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
authentik-ui:
rule: "Host(`authentik.git4ta.tech`)"
service: authentik-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
EOF
destination = "local/dynamic.yml"
}
template {
data = <<EOF
CLOUDFLARE_EMAIL=houzhongxu.houzhongxu@gmail.com
CLOUDFLARE_DNS_API_TOKEN=0aPWoLaQ59l0nyL1jIVzZaEx2e41Gjgcfhn3ztJr
CLOUDFLARE_ZONE_API_TOKEN=0aPWoLaQ59l0nyL1jIVzZaEx2e41Gjgcfhn3ztJr
EOF
destination = "local/cloudflare.env"
env = true
}
resources {
cpu = 500
memory = 512
}
}
}
}

View File

@@ -0,0 +1,249 @@
job "traefik-cloudflare-v3" {
datacenters = ["dc1"]
type = "service"
group "traefik" {
count = 1
constraint {
attribute = "${node.unique.name}"
value = "hcp1"
}
volume "traefik-certs" {
type = "host"
read_only = false
source = "traefik-certs"
}
network {
mode = "host"
port "http" {
static = 80
}
port "https" {
static = 443
}
port "traefik" {
static = 8080
}
}
task "traefik" {
driver = "exec"
config {
command = "/usr/local/bin/traefik"
args = [
"--configfile=/local/traefik.yml"
]
}
env {
CLOUDFLARE_EMAIL = "locksmithknight@gmail.com"
CLOUDFLARE_DNS_API_TOKEN = "0aPWoLaQ59l0nyL1jIVzZaEx2e41Gjgcfhn3ztJr"
CLOUDFLARE_ZONE_API_TOKEN = "0aPWoLaQ59l0nyL1jIVzZaEx2e41Gjgcfhn3ztJr"
}
volume_mount {
volume = "traefik-certs"
destination = "/opt/traefik/certs"
read_only = false
}
template {
data = <<EOF
api:
dashboard: true
insecure: true
entryPoints:
web:
address: "0.0.0.0:80"
http:
redirections:
entrypoint:
to: websecure
scheme: https
permanent: true
websecure:
address: "0.0.0.0:443"
traefik:
address: "0.0.0.0:8080"
providers:
consulCatalog:
endpoint:
address: "warden.tailnet-68f9.ts.net:8500"
scheme: "http"
watch: true
exposedByDefault: false
prefix: "traefik"
defaultRule: "Host(`{{ .Name }}.git-4ta.live`)"
file:
filename: /local/dynamic.yml
watch: true
certificatesResolvers:
cloudflare:
acme:
email: {{ env "CLOUDFLARE_EMAIL" }}
storage: /opt/traefik/certs/acme.json
dnsChallenge:
provider: cloudflare
delayBeforeCheck: 30s
log:
level: DEBUG
EOF
destination = "local/traefik.yml"
}
template {
data = <<EOF
http:
serversTransports:
waypoint-insecure:
insecureSkipVerify: true
authentik-insecure:
insecureSkipVerify: true
middlewares:
consul-stripprefix:
stripPrefix:
prefixes:
- "/consul"
waypoint-auth:
replacePathRegex:
regex: "^/auth/token(.*)$"
replacement: "/auth/token$1"
services:
consul-cluster:
loadBalancer:
servers:
- url: "http://ch4.tailnet-68f9.ts.net:8500" # 韩国Leader
- url: "http://warden.tailnet-68f9.ts.net:8500" # 北京Follower
- url: "http://ash3c.tailnet-68f9.ts.net:8500" # 美国Follower
healthCheck:
path: "/v1/status/leader"
interval: "30s"
timeout: "15s"
nomad-cluster:
loadBalancer:
servers:
- url: "http://ch2.tailnet-68f9.ts.net:4646" # 韩国Leader
- url: "http://ash3c.tailnet-68f9.ts.net:4646" # 美国Follower
healthCheck:
path: "/v1/status/leader"
interval: "30s"
timeout: "15s"
waypoint-cluster:
loadBalancer:
servers:
- url: "https://hcp1.tailnet-68f9.ts.net:9701" # hcp1 节点 HTTPS API
serversTransport: waypoint-insecure
vault-cluster:
loadBalancer:
servers:
- url: "http://warden.tailnet-68f9.ts.net:8200" # 北京,单节点
healthCheck:
path: "/ui/"
interval: "30s"
timeout: "15s"
authentik-cluster:
loadBalancer:
servers:
- url: "https://authentik.tailnet-68f9.ts.net:9443" # Authentik容器HTTPS端口
serversTransport: authentik-insecure
healthCheck:
path: "/flows/-/default/authentication/"
interval: "30s"
timeout: "15s"
routers:
consul-api:
rule: "Host(`consul.git-4ta.live`)"
service: consul-cluster
middlewares:
- consul-stripprefix
entryPoints:
- websecure
tls:
certResolver: cloudflare
traefik-dashboard:
rule: "Host(`traefik.git-4ta.live`)"
service: dashboard@internal
middlewares:
- dashboard_redirect@internal
- dashboard_stripprefix@internal
entryPoints:
- websecure
tls:
certResolver: cloudflare
traefik-api:
rule: "Host(`traefik.git-4ta.live`) && PathPrefix(`/api`)"
service: api@internal
entryPoints:
- websecure
tls:
certResolver: cloudflare
nomad-ui:
rule: "Host(`nomad.git-4ta.live`)"
service: nomad-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
waypoint-ui:
rule: "Host(`waypoint.git-4ta.live`)"
service: waypoint-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
vault-ui:
rule: "Host(`vault.git-4ta.live`)"
service: vault-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
authentik-ui:
rule: "Host(`authentik1.git-4ta.live`)"
service: authentik-cluster
entryPoints:
- websecure
tls:
certResolver: cloudflare
EOF
destination = "local/dynamic.yml"
}
template {
data = <<EOF
CLOUDFLARE_EMAIL=locksmithknight@gmail.com
CLOUDFLARE_DNS_API_TOKEN=0aPWoLaQ59l0nyL1jIVzZaEx2e41Gjgcfhn3ztJr
CLOUDFLARE_ZONE_API_TOKEN=0aPWoLaQ59l0nyL1jIVzZaEx2e41Gjgcfhn3ztJr
EOF
destination = "local/cloudflare.env"
env = true
}
resources {
cpu = 500
memory = 512
}
}
}
}

View File

@@ -1,97 +0,0 @@
job "traefik-consul-lb" {
datacenters = ["dc1"]
type = "service"
group "traefik" {
count = 1
constraint {
attribute = "${node.unique.name}"
value = "hcp1"
}
update {
min_healthy_time = "5s"
healthy_deadline = "10m"
progress_deadline = "15m"
auto_revert = false
}
network {
mode = "host"
port "http" {
static = 80
host_network = "tailscale0"
}
port "traefik" {
static = 8080
host_network = "tailscale0"
}
}
task "traefik" {
driver = "exec"
config {
command = "/usr/local/bin/traefik"
args = [
"--configfile=/local/traefik.yml"
]
}
template {
data = <<EOF
api:
dashboard: true
insecure: true
entryPoints:
web:
address: "100.97.62.111:80"
traefik:
address: "100.97.62.111:8080"
providers:
file:
filename: /local/dynamic.yml
watch: true
log:
level: INFO
EOF
destination = "local/traefik.yml"
}
template {
data = <<EOF
http:
services:
consul-cluster:
loadBalancer:
servers:
- url: "http://warden.tailnet-68f9.ts.net:8500" # 北京,优先
- url: "http://master.tailnet-68f9.ts.net:8500" # 备用
- url: "http://ash3c.tailnet-68f9.ts.net:8500" # 备用
healthCheck:
path: "/v1/status/leader"
interval: "30s"
timeout: "15s"
routers:
consul-api:
rule: "PathPrefix(`/`)"
service: consul-cluster
entryPoints:
- web
EOF
destination = "local/dynamic.yml"
}
resources {
cpu = 500
memory = 512
}
}
}
}

View File

@@ -1,283 +0,0 @@
job "vault-cluster-exec" {
datacenters = ["dc1"]
type = "service"
group "vault-master" {
count = 1
# 使用存在的属性替代consul版本检查
constraint {
attribute = "${driver.exec}"
operator = "="
value = "1"
}
constraint {
attribute = "${node.unique.name}"
value = "kr-master"
}
network {
port "api" {
static = 8200
}
port "cluster" {
static = 8201
}
}
task "vault" {
driver = "exec"
config {
command = "vault"
args = [
"server",
"-config=/opt/nomad/data/vault/config/vault.hcl"
]
}
template {
data = <<EOH
storage "consul" {
address = "{{ with nomadService "consul" }}{{ range . }}{{ if contains .Tags "http" }}{{ .Address }}:{{ .Port }}{{ end }}{{ end }}{{ end }}"
path = "vault/"
# Consul服务发现配置
service {
name = "vault"
tags = ["vault"]
}
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1 # 生产环境应启用TLS
}
api_addr = "http://{{ env "NOMAD_IP_api" }}:8200"
cluster_addr = "http://{{ env "NOMAD_IP_cluster" }}:8201"
ui = true
disable_mlock = true
# 添加更多配置来解决权限问题
disable_sealwrap = true
disable_cache = false
# 启用原始日志记录
enable_raw_log = true
# 集成Nomad服务发现
service_registration {
enabled = true
}
EOH
destination = "/opt/nomad/data/vault/config/vault.hcl"
}
resources {
cpu = 100
memory = 256
}
service {
name = "vault"
port = "api"
check {
name = "vault-health"
type = "http"
path = "/v1/sys/health"
interval = "10s"
timeout = "2s"
}
}
}
}
group "vault-ash3c" {
count = 1
# 移除对consul版本的约束使用driver约束替代
constraint {
attribute = "${driver.exec}"
operator = "="
value = "1"
}
constraint {
attribute = "${node.unique.name}"
value = "us-ash3c"
}
network {
port "api" {
static = 8200
}
port "cluster" {
static = 8201
}
}
task "vault" {
driver = "exec"
config {
command = "vault"
args = [
"server",
"-config=/opt/nomad/data/vault/config/vault.hcl"
]
}
template {
data = <<EOH
storage "consul" {
address = "{{ with nomadService "consul" }}{{ range . }}{{ if contains .Tags "http" }}{{ .Address }}:{{ .Port }}{{ end }}{{ end }}{{ end }}"
path = "vault/"
# Consul服务发现配置
service {
name = "vault"
tags = ["vault"]
}
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1 # 生产环境应启用TLS
}
api_addr = "http://{{ env "NOMAD_IP_api" }}:8200"
cluster_addr = "http://{{ env "NOMAD_IP_cluster" }}:8201"
ui = true
disable_mlock = true
# 添加更多配置来解决权限问题
disable_sealwrap = true
disable_cache = false
# 启用原始日志记录
enable_raw_log = true
# 集成Nomad服务发现
service_registration {
enabled = true
}
EOH
destination = "/opt/nomad/data/vault/config/vault.hcl"
}
resources {
cpu = 100
memory = 256
}
service {
name = "vault"
port = "api"
check {
name = "vault-health"
type = "http"
path = "/v1/sys/health"
interval = "10s"
timeout = "2s"
}
}
}
}
group "vault-warden" {
count = 1
# 移除对consul版本的约束使用driver约束替代
constraint {
attribute = "${driver.exec}"
operator = "="
value = "1"
}
constraint {
attribute = "${node.unique.name}"
value = "bj-warden"
}
network {
port "api" {
static = 8200
}
port "cluster" {
static = 8201
}
}
task "vault" {
driver = "exec"
config {
command = "vault"
args = [
"server",
"-config=/opt/nomad/data/vault/config/vault.hcl"
]
}
template {
data = <<EOH
storage "consul" {
address = "{{ with nomadService "consul" }}{{ range . }}{{ if contains .Tags "http" }}{{ .Address }}:{{ .Port }}{{ end }}{{ end }}{{ end }}"
path = "vault/"
# Consul服务发现配置
service {
name = "vault"
tags = ["vault"]
}
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1 # 生产环境应启用TLS
}
api_addr = "http://{{ env "NOMAD_IP_api" }}:8200"
cluster_addr = "http://{{ env "NOMAD_IP_cluster" }}:8201"
ui = true
disable_mlock = true
# 添加更多配置来解决权限问题
disable_sealwrap = true
disable_cache = false
# 启用原始日志记录
enable_raw_log = true
# 集成Nomad服务发现
service_registration {
enabled = true
}
EOH
destination = "/opt/nomad/data/vault/config/vault.hcl"
}
resources {
cpu = 100
memory = 256
}
service {
name = "vault"
port = "api"
check {
name = "vault-health"
type = "http"
path = "/v1/sys/health"
interval = "10s"
timeout = "2s"
}
}
}
}
}

View File

@@ -1,94 +0,0 @@
job "vault-cluster" {
datacenters = ["dc1"]
type = "service"
group "vault-servers" {
count = 3
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "(warden|ash3c|master)"
}
task "vault" {
driver = "podman"
config {
image = "hashicorp/vault:latest"
ports = ["api", "cluster"]
# 确保容器在退出时不会自动重启
command = "vault"
args = [
"server",
"-config=/vault/config/vault.hcl"
]
# 容器网络设置
network_mode = "host"
# 安全设置
cap_add = ["IPC_LOCK"]
}
template {
data = <<EOH
storage "consul" {
address = "localhost:8500"
path = "vault/"
token = "{{ with secret "consul/creds/vault" }}{{ .Data.token }}{{ end }}"
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1 # 生产环境应启用TLS
}
api_addr = "http://{{ env "NOMAD_IP_api" }}:8200"
cluster_addr = "http://{{ env "NOMAD_IP_cluster" }}:8201"
ui = true
disable_mlock = true
EOH
destination = "vault/config/vault.hcl"
}
volume_mount {
volume = "vault-data"
destination = "/vault/data"
read_only = false
}
resources {
cpu = 500
memory = 1024
network {
mbits = 10
port "api" { static = 8200 }
port "cluster" { static = 8201 }
}
}
service {
name = "vault"
port = "api"
check {
name = "vault-health"
type = "http"
path = "/v1/sys/health"
interval = "10s"
timeout = "2s"
}
}
}
volume "vault-data" {
type = "host"
read_only = false
source = "vault-data"
}
}
}

View File

@@ -1,65 +0,0 @@
job "vault-dev-warden" {
datacenters = ["dc1"]
type = "service"
group "vault-dev" {
count = 1
# 约束到有consul的节点
constraint {
attribute = "${meta.consul}"
operator = "="
value = "true"
}
network {
port "http" {
to = 8200
}
port "cluster" {
to = 8201
}
}
service {
name = "vault-dev"
port = "http"
check {
type = "http"
path = "/v1/sys/health"
interval = "10s"
timeout = "5s"
}
}
task "vault-dev" {
driver = "raw_exec"
config {
command = "vault"
args = [
"server",
"-dev",
"-dev-listen-address=0.0.0.0:8200",
"-dev-root-token-id=root"
]
}
env {
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = "root"
}
resources {
cpu = 500
memory = 512
}
logs {
max_files = 10
max_file_size = 10
}
}
}
}