Clean up repository: remove backup files and reorganize infrastructure components

This commit is contained in:
2025-10-02 17:04:51 +00:00
parent e5aa00d6f9
commit 1c994f9f60
133 changed files with 1835 additions and 11296 deletions

View File

@@ -1,60 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "us-ash3c"
bind_addr = "100.116.80.94"
addresses {
http = "100.116.80.94"
rpc = "100.116.80.94"
serf = "100.116.80.94"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = false
}
client {
enabled = true
network_interface = "tailscale0"
# 配置七姐妹服务器地址
servers = [
"100.116.158.95:4647", # bj-semaphore
"100.81.26.3:4647", # ash1d
"100.103.147.94:4647", # ash2e
"100.90.159.68:4647", # ch2
"100.86.141.112:4647", # ch3
"100.98.209.50:4647", # bj-onecloud1
"100.120.225.29:4647" # de
]
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1,56 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "kr-master"
bind_addr = "100.117.106.136"
addresses {
http = "100.117.106.136"
rpc = "100.117.106.136"
serf = "100.117.106.136"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = false
}
client {
enabled = true
network_interface = "tailscale0"
servers = [
"100.116.158.95:4647", # semaphore
"100.103.147.94:4647", # ash2e
"100.81.26.3:4647", # ash1d
"100.90.159.68:4647" # ch2
]
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1,56 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "bj-warden"
bind_addr = "100.122.197.112"
addresses {
http = "100.122.197.112"
rpc = "100.122.197.112"
serf = "100.122.197.112"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = false
}
client {
enabled = true
network_interface = "tailscale0"
servers = [
"100.116.158.95:4647", # semaphore
"100.103.147.94:4647", # ash2e
"100.81.26.3:4647", # ash1d
"100.90.159.68:4647" # ch2
]
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1,51 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "us-ash1d"
bind_addr = "100.81.26.3"
addresses {
http = "100.81.26.3"
rpc = "100.81.26.3"
serf = "100.81.26.3"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = true
retry_join = ["us-ash1d", "ash2e", "ch2", "ch3", "onecloud1", "de"]
}
client {
enabled = false
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1,51 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "us-ash2e"
bind_addr = "100.103.147.94"
addresses {
http = "100.103.147.94"
rpc = "100.103.147.94"
serf = "100.103.147.94"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = true
retry_join = ["us-ash2e", "ash1d", "ch2", "ch3", "onecloud1", "de"]
}
client {
enabled = false
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1,51 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "kr-ch2"
bind_addr = "100.90.159.68"
addresses {
http = "100.90.159.68"
rpc = "100.90.159.68"
serf = "100.90.159.68"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = true
retry_join = ["kr-ch2", "ash1d", "ash2e", "ch3", "onecloud1", "de"]
}
client {
enabled = false
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {#三个节点
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {#三个节点
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1,51 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "kr-ch3"
bind_addr = "100.86.141.112"
addresses {
http = "100.86.141.112"
rpc = "100.86.141.112"
serf = "100.86.141.112"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = true
data_dir = "/opt/nomad/data"
}
client {
enabled = false
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {#三个节点
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {#三个节点
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1,50 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "de"
bind_addr = "100.120.225.29"
addresses {
http = "100.120.225.29"
rpc = "100.120.225.29"
serf = "100.120.225.29"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = true
}
client {
enabled = false
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {#三个节点
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {#三个节点
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1,50 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "onecloud1"
bind_addr = "100.98.209.50"
addresses {
http = "100.98.209.50"
rpc = "100.98.209.50"
serf = "100.98.209.50"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = true
}
client {
enabled = false
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1,51 +0,0 @@
datacenter = "dc1"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
log_level = "INFO"
name = "semaphore"
bind_addr = "100.116.158.95"
addresses {
http = "100.116.158.95"
rpc = "100.116.158.95"
serf = "100.116.158.95"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
server {
enabled = true
bootstrap_expect = 3
}
client {
enabled = false
}
plugin "nomad-driver-podman" {
config {
socket_path = "unix:///run/podman/podman.sock"
volumes {
enabled = true
}
}
}
consul {
address = "100.117.106.136:8500,100.116.80.94:8500,100.122.197.112:8500" # master, ash3c, warden
}
vault {
enabled = true
address = "http://100.117.106.136:8200,http://100.116.80.94:8200,http://100.122.197.112:8200" # master, ash3c, warden
token = "hvs.A5Fu4E1oHyezJapVllKPFsWg"
create_from_role = "nomad-cluster"
tls_skip_verify = true
}

View File

@@ -1 +0,0 @@
components/consul/jobs/

View File

@@ -1,37 +0,0 @@
# DigitalOcean 密钥存储作业
job "digitalocean-key-store" {
datacenters = ["dc1"]
type = "batch"
group "key-store" {
task "store-key" {
driver = "exec"
config {
command = "/bin/sh"
args = [
"-c",
<<EOT
# 将DigitalOcean密钥存储到Consul中
curl -X PUT -H "X-Consul-Token: ${CONSUL_HTTP_TOKEN}" \
http://127.0.0.1:8500/v1/kv/council/digitalocean/token \
-d 'dop_v1_70582bb508873709d96debc7f2a2d04df2093144b2b15fe392dba83b88976376'
# 验证密钥是否存储成功
curl -s http://127.0.0.1:8500/v1/kv/council/digitalocean/token?raw
EOT
]
}
env {
CONSUL_HTTP_ADDR = "http://127.0.0.1:8500"
CONSUL_HTTP_TOKEN = "root" # 根据实际Consul配置调整
}
resources {
cpu = 100
memory = 64
}
}
}
}

View File

@@ -1,65 +0,0 @@
job "hybrid-nfs-app" {
datacenters = ["dc1"]
type = "service"
# 使用约束条件区分存储类型
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "semaphore"
}
group "app" {
count = 1
network {
port "http" {
static = 8080
}
}
# 对于本机semaphore使用host volume
volume "local-storage" {
type = "host"
read_only = false
source = "local-fnsync"
}
task "web-app" {
driver = "exec"
config {
command = "python3"
args = ["-m", "http.server", "8080", "--directory", "local/fnsync"]
}
template {
data = <<EOH
<h1>Hybrid NFS App - Running on {{ env "attr.unique.hostname" }}</h1>
<p>Storage Type: {{ with eq (env "attr.unique.hostname") "semaphore" }}PVE Mount{{ else }}NFS{{ end }}</p>
<p>Timestamp: {{ now | date "2006-01-02 15:04:05" }}</p>
EOH
destination = "local/fnsync/index.html"
}
resources {
cpu = 100
memory = 128
}
service {
name = "hybrid-nfs-app"
port = "http"
tags = ["hybrid", "nfs", "web"]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
}
}
}

View File

@@ -1,51 +0,0 @@
job "nfs-app-example" {
datacenters = ["dc1"]
type = "service"
group "app" {
count = 1
# 使用NFS存储卷
volume "nfs-storage" {
type = "host"
read_only = false
source = "nfs-fnsync"
}
task "web-app" {
driver = "docker"
config {
image = "nginx:alpine"
ports = ["http"]
# 挂载NFS卷到容器
mount {
type = "volume"
target = "/usr/share/nginx/html"
source = "nfs-storage"
readonly = false
}
}
resources {
cpu = 100
memory = 128
}
service {
name = "nfs-web-app"
port = "http"
tags = ["nfs", "web"]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
}
}
}

View File

@@ -1,34 +0,0 @@
job "nfs-storage-test" {
datacenters = ["dc1"]
type = "batch"
group "test" {
count = 1
volume "nfs-storage" {
type = "csi"
read_only = false
source = "nfs-fnsync"
}
task "storage-test" {
driver = "exec"
volume_mount {
volume = "nfs-storage"
destination = "/mnt/nfs"
read_only = false
}
config {
command = "/bin/sh"
args = ["-c", "echo 'NFS Storage Test - $(hostname) - $(date)' > /mnt/nfs/test-$(hostname).txt && ls -la /mnt/nfs/"]
}
resources {
cpu = 50
memory = 64
}
}
}
}

View File

@@ -1 +0,0 @@
components/nomad/jobs/

View File

@@ -1,84 +0,0 @@
job "nfs-multi-type-example" {
datacenters = ["dc1"]
type = "service"
# 为本地LXC容器配置的任务组
group "lxc-apps" {
count = 2
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "(influxdb|hcp)"
}
volume "lxc-nfs" {
type = "host"
source = "nfs-shared"
read_only = false
}
task "lxc-app" {
driver = "podman"
config {
image = "alpine:latest"
args = ["tail", "-f", "/dev/null"]
}
volume_mount {
volume = "lxc-nfs"
destination = "/shared/lxc"
read_only = false
}
resources {
cpu = 100
memory = 64
}
}
}
# 为海外PVE容器配置的任务组
group "pve-apps" {
count = 3
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "(ash1d|ash2e|ash3c|ch2|ch3)"
}
volume "pve-nfs" {
type = "host"
source = "nfs-shared"
read_only = false
}
task "pve-app" {
driver = "podman"
config {
image = "alpine:latest"
args = ["tail", "-f", "/dev/null"]
# 为海外节点添加网络优化参数
network_mode = "host"
}
volume_mount {
volume = "pve-nfs"
destination = "/shared/pve"
read_only = false
}
resources {
cpu = 100
memory = 64
network {
mbits = 5
}
}
}
}
}

View File

@@ -1,86 +0,0 @@
job "openfaas-functions" {
datacenters = ["dc1"]
type = "service"
group "hello-world" {
count = 1
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "(master|ash3c|hcp)"
}
task "hello-world" {
driver = "podman"
config {
image = "functions/hello-world:latest"
ports = ["http"]
env = {
"fprocess" = "node index.js"
}
}
resources {
network {
mbits = 10
port "http" { static = 8080 }
}
}
service {
name = "hello-world"
port = "http"
tags = ["openfaas-function"]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
}
}
group "figlet" {
count = 1
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "(master|ash3c|hcp)"
}
task "figlet" {
driver = "podman"
config {
image = "functions/figlet:latest"
ports = ["http"]
env = {
"fprocess" = "figlet"
}
}
resources {
network {
mbits = 10
port "http" { static = 8080 }
}
}
service {
name = "figlet"
port = "http"
tags = ["openfaas-function"]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
}
}
}

View File

@@ -1,176 +0,0 @@
job "openfaas" {
datacenters = ["dc1"]
type = "service"
group "openfaas-gateway" {
count = 1
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "(master|ash3c|hcp)"
}
task "openfaas-gateway" {
driver = "podman"
config {
image = "ghcr.io/openfaas/gateway:0.2.35"
ports = ["http", "ui"]
env = {
"functions_provider_url" = "http://${NOMAD_IP_http}:8080"
"read_timeout" = "60s"
"write_timeout" = "60s"
"upstream_timeout" = "60s"
"direct_functions" = "true"
"faas_nats_address" = "nats://localhost:4222"
"faas_nats_streaming" = "true"
"basic_auth" = "true"
"secret_mount_path" = "/run/secrets"
"scale_from_zero" = "true"
}
}
resources {
network {
mbits = 10
port "http" { static = 8080 }
port "ui" { static = 8081 }
}
}
service {
name = "openfaas-gateway"
port = "http"
check {
type = "http"
path = "/healthz"
interval = "10s"
timeout = "2s"
}
}
}
}
group "nats" {
count = 1
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "(master|ash3c|hcp)"
}
task "nats" {
driver = "podman"
config {
image = "nats-streaming:0.25.3"
ports = ["nats"]
args = [
"-p",
"4222",
"-m",
"8222",
"-hbi",
"5s",
"-hbt",
"5s",
"-hbf",
"2",
"-SD",
"-cid",
"openfaas"
]
}
resources {
network {
mbits = 10
port "nats" { static = 4222 }
}
}
service {
name = "nats"
port = "nats"
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
}
group "queue-worker" {
count = 1
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "(master|ash3c|hcp)"
}
task "queue-worker" {
driver = "podman"
config {
image = "ghcr.io/openfaas/queue-worker:0.12.2"
env = {
"gateway_url" = "http://${NOMAD_IP_http}:8080"
"faas_nats_address" = "nats://localhost:4222"
"faas_nats_streaming" = "true"
"ack_wait" = "5m"
"write_debug" = "true"
}
}
resources {
network {
mbits = 10
}
}
}
}
group "prometheus" {
count = 1
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "(master|ash3c|hcp)"
}
task "prometheus" {
driver = "podman"
config {
image = "prom/prometheus:v2.35.0"
ports = ["prometheus"]
volumes = [
"/opt/openfaas/prometheus.yml:/etc/prometheus/prometheus.yml"
]
}
resources {
network {
mbits = 10
port "prometheus" { static = 9090 }
}
}
service {
name = "prometheus"
port = "prometheus"
check {
type = "http"
path = "/-/healthy"
interval = "10s"
timeout = "2s"
}
}
}
}
}

View File

@@ -1,130 +0,0 @@
job "traefik" {
datacenters = ["dc1"]
type = "service"
update {
max_parallel = 1
min_healthy_time = "10s"
healthy_deadline = "3m"
auto_revert = true
}
group "traefik" {
count = 1 # 先在warden节点部署一个实例
# 约束只在warden节点运行
constraint {
attribute = "${node.unique.name}"
operator = "="
value = "bj-warden"
}
restart {
attempts = 3
interval = "30m"
delay = "15s"
mode = "fail"
}
network {
port "http" {
static = 80
}
port "https" {
static = 443
}
port "api" {
static = 8080
}
}
task "traefik" {
driver = "exec"
# 下载Traefik v3二进制文件
artifact {
source = "https://github.com/traefik/traefik/releases/download/v3.1.5/traefik_v3.1.5_linux_amd64.tar.gz"
destination = "local/"
mode = "file"
options {
archive = "true"
}
}
# 动态配置文件模板
template {
data = <<EOF
# Traefik动态配置 - 从Consul获取服务
http:
routers:
consul-master:
rule: "Host(`consul-master.service.consul`)"
service: consul-master
entryPoints: ["http"]
services:
consul-master:
loadBalancer:
servers:
{{ range nomadService "consul" }}
{{ if contains .Tags "http" }}
- url: "http://{{ .Address }}:{{ .Port }}"
{{ end }}
{{ end }}
# Consul Catalog配置
providers:
consulCatalog:
exposedByDefault: false
prefix: "traefik"
refreshInterval: 15s
endpoint:
address: "http://{{ with nomadService "consul" }}{{ range . }}{{ if contains .Tags "http" }}{{ .Address }}:{{ .Port }}{{ end }}{{ end }}{{ end }}"
connectAware: true
connectByDefault: false
EOF
destination = "local/dynamic.yml"
change_mode = "restart"
}
config {
command = "local/traefik"
args = [
"--configfile=/root/mgmt/infrastructure/routes/traefik.yml",
"--providers.file.filename=local/dynamic.yml",
"--providers.file.watch=true"
]
}
env {
NOMAD_ADDR = "http://${attr.unique.network.ip-address}:4646"
# Consul地址将通过template动态获取
}
resources {
cpu = 200
memory = 256
}
service {
name = "traefik-warden"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik-warden.rule=Host(`traefik.warden.consul`)",
"traefik.http.routers.traefik-warden.service=api@internal",
"traefik.http.routers.traefik-warden.entrypoints=api",
"traefik.http.services.traefik-warden.loadbalancer.server.port=8080",
"warden"
]
check {
type = "http"
path = "/ping"
interval = "10s"
timeout = "2s"
}
}
}
}
}

View File

@@ -1 +0,0 @@
components/vault/jobs/

View File

@@ -1,228 +0,0 @@
#!/bin/bash
# Nomad 多数据中心节点自动配置脚本
# 数据中心: ${datacenter}
set -e
# 日志函数
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a /var/log/nomad-setup.log
}
log "开始配置 Nomad 节点 - 数据中心: ${datacenter}"
# 更新系统
log "更新系统包..."
apt-get update -y
apt-get upgrade -y
# 安装必要的包
log "安装必要的包..."
apt-get install -y \
curl \
wget \
unzip \
jq \
podman \
htop \
net-tools \
vim
# 启动 Podman
log "启动 Podman 服务..."
systemctl enable podman
systemctl start podman
usermod -aG podman ubuntu
# 安装 Nomad
log "安装 Nomad ${nomad_version}..."
cd /tmp
wget -q https://releases.hashicorp.com/nomad/${nomad_version}/nomad_${nomad_version}_linux_amd64.zip
unzip nomad_${nomad_version}_linux_amd64.zip
mv nomad /usr/local/bin/
chmod +x /usr/local/bin/nomad
# 创建 Nomad 用户和目录
log "创建 Nomad 用户和目录..."
useradd --system --home /etc/nomad.d --shell /bin/false nomad
mkdir -p /opt/nomad/data
mkdir -p /etc/nomad.d
mkdir -p /var/log/nomad
chown -R nomad:nomad /opt/nomad /etc/nomad.d /var/log/nomad
# 获取本机 IP 地址
if [ "${bind_addr}" = "auto" ]; then
# 尝试多种方法获取 IP
BIND_ADDR=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null || \
curl -s http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip -H "Metadata-Flavor: Google" 2>/dev/null || \
ip route get 8.8.8.8 | awk '{print $7; exit}' || \
hostname -I | awk '{print $1}')
else
BIND_ADDR="${bind_addr}"
fi
log "检测到 IP 地址: $BIND_ADDR"
# 创建 Nomad 配置文件
log "创建 Nomad 配置文件..."
cat > /etc/nomad.d/nomad.hcl << EOF
datacenter = "${datacenter}"
region = "dc1"
data_dir = "/opt/nomad/data"
bind_addr = "$BIND_ADDR"
%{ if server_enabled }
server {
enabled = true
bootstrap_expect = ${bootstrap_expect}
encrypt = "${nomad_encrypt_key}"
}
%{ endif }
%{ if client_enabled }
client {
enabled = true
host_volume "podman-sock" {
path = "/run/podman/podman.sock"
read_only = false
}
}
%{ endif }
ui {
enabled = true
}
addresses {
http = "0.0.0.0"
rpc = "$BIND_ADDR"
serf = "$BIND_ADDR"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
plugin "podman" {
config {
volumes {
enabled = true
}
}
}
telemetry {
collection_interval = "10s"
disable_hostname = false
prometheus_metrics = true
publish_allocation_metrics = true
publish_node_metrics = true
}
log_level = "INFO"
log_file = "/var/log/nomad/nomad.log"
EOF
# 创建 systemd 服务文件
log "创建 systemd 服务文件..."
cat > /etc/systemd/system/nomad.service << EOF
[Unit]
Description=Nomad
Documentation=https://www.nomadproject.io/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/nomad.d/nomad.hcl
[Service]
Type=notify
User=nomad
Group=nomad
ExecStart=/usr/local/bin/nomad agent -config=/etc/nomad.d/nomad.hcl
ExecReload=/bin/kill -HUP \$MAINPID
KillMode=process
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 启动 Nomad 服务
log "启动 Nomad 服务..."
systemctl daemon-reload
systemctl enable nomad
systemctl start nomad
# 等待服务启动
log "等待 Nomad 服务启动..."
sleep 10
# 验证安装
log "验证 Nomad 安装..."
if systemctl is-active --quiet nomad; then
log "✅ Nomad 服务运行正常"
log "📊 节点信息:"
/usr/local/bin/nomad node status -self || true
else
log "❌ Nomad 服务启动失败"
systemctl status nomad --no-pager || true
journalctl -u nomad --no-pager -n 20 || true
fi
# 配置防火墙(如果需要)
log "配置防火墙规则..."
if command -v ufw >/dev/null 2>&1; then
ufw allow 4646/tcp # HTTP API
ufw allow 4647/tcp # RPC
ufw allow 4648/tcp # Serf
ufw allow 22/tcp # SSH
fi
# 创建有用的别名和脚本
log "创建管理脚本..."
cat > /usr/local/bin/nomad-status << 'EOF'
#!/bin/bash
echo "=== Nomad 服务状态 ==="
systemctl status nomad --no-pager
echo -e "\n=== Nomad 集群成员 ==="
nomad server members 2>/dev/null || echo "无法连接到集群"
echo -e "\n=== Nomad 节点状态 ==="
nomad node status 2>/dev/null || echo "无法获取节点状态"
echo -e "\n=== 最近日志 ==="
journalctl -u nomad --no-pager -n 5
EOF
chmod +x /usr/local/bin/nomad-status
# 添加到 ubuntu 用户的 bashrc
echo 'alias ns="nomad-status"' >> /home/ubuntu/.bashrc
echo 'alias nomad-logs="journalctl -u nomad -f"' >> /home/ubuntu/.bashrc
log "🎉 Nomad 节点配置完成!"
log "📍 数据中心: ${datacenter}"
log "🌐 IP 地址: $BIND_ADDR"
log "🔗 Web UI: http://$BIND_ADDR:4646"
log "📝 使用 'nomad-status' 或 'ns' 命令查看状态"
# 输出重要信息到 motd
cat > /etc/update-motd.d/99-nomad << EOF
#!/bin/bash
echo ""
echo "🚀 Nomad 节点信息:"
echo " 数据中心: ${datacenter}"
echo " IP 地址: $BIND_ADDR"
echo " Web UI: http://$BIND_ADDR:4646"
echo " 状态检查: nomad-status"
echo ""
EOF
chmod +x /etc/update-motd.d/99-nomad
log "节点配置脚本执行完成"

View File

@@ -1,228 +0,0 @@
#!/bin/bash
# Nomad 多数据中心节点自动配置脚本
# 数据中心: ${datacenter}
set -e
# 日志函数
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a /var/log/nomad-setup.log
}
log "开始配置 Nomad 节点 - 数据中心: ${datacenter}"
# 更新系统
log "更新系统包..."
apt-get update -y
apt-get upgrade -y
# 安装必要的包
log "安装必要的包..."
apt-get install -y \
curl \
wget \
unzip \
jq \
podman \
htop \
net-tools \
vim
# 启动 Podman
log "启动 Podman 服务..."
systemctl enable podman
systemctl start podman
usermod -aG podman ubuntu
# 安装 Nomad
log "安装 Nomad ${nomad_version}..."
cd /tmp
wget -q https://releases.hashicorp.com/nomad/${nomad_version}/nomad_${nomad_version}_linux_amd64.zip
unzip nomad_${nomad_version}_linux_amd64.zip
mv nomad /usr/local/bin/
chmod +x /usr/local/bin/nomad
# 创建 Nomad 用户和目录
log "创建 Nomad 用户和目录..."
useradd --system --home /etc/nomad.d --shell /bin/false nomad
mkdir -p /opt/nomad/data
mkdir -p /etc/nomad.d
mkdir -p /var/log/nomad
chown -R nomad:nomad /opt/nomad /etc/nomad.d /var/log/nomad
# 获取本机 IP 地址
if [ "${bind_addr}" = "auto" ]; then
# 尝试多种方法获取 IP
BIND_ADDR=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null || \
curl -s http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip -H "Metadata-Flavor: Google" 2>/dev/null || \
ip route get 8.8.8.8 | awk '{print $7; exit}' || \
hostname -I | awk '{print $1}')
else
BIND_ADDR="${bind_addr}"
fi
log "检测到 IP 地址: $BIND_ADDR"
# 创建 Nomad 配置文件
log "创建 Nomad 配置文件..."
cat > /etc/nomad.d/nomad.hcl << EOF
datacenter = "${datacenter}"
region = "dc1"
data_dir = "/opt/nomad/data"
bind_addr = "$BIND_ADDR"
%{ if server_enabled }
server {
enabled = true
bootstrap_expect = ${bootstrap_expect}
encrypt = "${nomad_encrypt_key}"
}
%{ endif }
%{ if client_enabled }
client {
enabled = true
host_volume "podman-sock" {
path = "/run/podman/podman.sock"
read_only = false
}
}
%{ endif }
ui {
enabled = true
}
addresses {
http = "0.0.0.0"
rpc = "$BIND_ADDR"
serf = "$BIND_ADDR"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
plugin "podman" {
config {
volumes {
enabled = true
}
}
}
telemetry {
collection_interval = "10s"
disable_hostname = false
prometheus_metrics = true
publish_allocation_metrics = true
publish_node_metrics = true
}
log_level = "INFO"
log_file = "/var/log/nomad/nomad.log"
EOF
# 创建 systemd 服务文件
log "创建 systemd 服务文件..."
cat > /etc/systemd/system/nomad.service << EOF
[Unit]
Description=Nomad
Documentation=https://www.nomadproject.io/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/nomad.d/nomad.hcl
[Service]
Type=notify
User=nomad
Group=nomad
ExecStart=/usr/local/bin/nomad agent -config=/etc/nomad.d/nomad.hcl
ExecReload=/bin/kill -HUP \$MAINPID
KillMode=process
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 启动 Nomad 服务
log "启动 Nomad 服务..."
systemctl daemon-reload
systemctl enable nomad
systemctl start nomad
# 等待服务启动
log "等待 Nomad 服务启动..."
sleep 10
# 验证安装
log "验证 Nomad 安装..."
if systemctl is-active --quiet nomad; then
log "✅ Nomad 服务运行正常"
log "📊 节点信息:"
/usr/local/bin/nomad node status -self || true
else
log "❌ Nomad 服务启动失败"
systemctl status nomad --no-pager || true
journalctl -u nomad --no-pager -n 20 || true
fi
# 配置防火墙(如果需要)
log "配置防火墙规则..."
if command -v ufw >/dev/null 2>&1; then
ufw allow 4646/tcp # HTTP API
ufw allow 4647/tcp # RPC
ufw allow 4648/tcp # Serf
ufw allow 22/tcp # SSH
fi
# 创建有用的别名和脚本
log "创建管理脚本..."
cat > /usr/local/bin/nomad-status << 'EOF'
#!/bin/bash
echo "=== Nomad 服务状态 ==="
systemctl status nomad --no-pager
echo -e "\n=== Nomad 集群成员 ==="
nomad server members 2>/dev/null || echo "无法连接到集群"
echo -e "\n=== Nomad 节点状态 ==="
nomad node status 2>/dev/null || echo "无法获取节点状态"
echo -e "\n=== 最近日志 ==="
journalctl -u nomad --no-pager -n 5
EOF
chmod +x /usr/local/bin/nomad-status
# 添加到 ubuntu 用户的 bashrc
echo 'alias ns="nomad-status"' >> /home/ubuntu/.bashrc
echo 'alias nomad-logs="journalctl -u nomad -f"' >> /home/ubuntu/.bashrc
log "🎉 Nomad 节点配置完成!"
log "📍 数据中心: ${datacenter}"
log "🌐 IP 地址: $BIND_ADDR"
log "🔗 Web UI: http://$BIND_ADDR:4646"
log "📝 使用 'nomad-status' 或 'ns' 命令查看状态"
# 输出重要信息到 motd
cat > /etc/update-motd.d/99-nomad << EOF
#!/bin/bash
echo ""
echo "🚀 Nomad 节点信息:"
echo " 数据中心: ${datacenter}"
echo " IP 地址: $BIND_ADDR"
echo " Web UI: http://$BIND_ADDR:4646"
echo " 状态检查: nomad-status"
echo ""
EOF
chmod +x /etc/update-motd.d/99-nomad
log "节点配置脚本执行完成"

View File

@@ -1,54 +0,0 @@
# Traefik静态配置文件
global:
sendAnonymousUsage: false
# API和仪表板配置
api:
dashboard: true
insecure: true # 仅用于测试,生产环境应使用安全配置
# 入口点配置
entryPoints:
http:
address: ":80"
# 重定向HTTP到HTTPS
http:
redirections:
entryPoint:
to: https
scheme: https
https:
address: ":443"
api:
address: ":8080"
# 提供者配置
providers:
# 启用文件提供者用于动态配置
file:
directory: "/etc/traefik/dynamic"
watch: true
# Nomad提供者 - 使用静态地址因为Nomad API相对稳定
nomad:
exposedByDefault: false
prefix: "traefik"
refreshInterval: 15s
stale: false
watch: true
endpoint:
address: "http://127.0.0.1:4646"
scheme: "http"
allowEmptyServices: true
# 日志配置
log:
level: "INFO"
format: "json"
accessLog:
format: "json"
fields:
defaultMode: "keep"
headers:
defaultMode: "keep"