feat: 添加MCP服务器测试套件和Kali Linux测试支持

refactor(consul): 将Consul集群作业文件移动到components目录
refactor(vault): 将Vault集群作业文件移动到components目录
refactor(nomad): 将Nomad NFS卷作业文件移动到components目录

fix(ssh): 修复浏览器主机的SSH密钥认证配置
fix(ansible): 更新Ansible配置以支持SSH密钥认证

test: 添加全面的MCP服务器测试脚本和报告
test: 添加Kali Linux测试套件和健康检查
test: 添加自动化测试运行脚本

docs: 更新README以包含测试说明和经验教训
docs: 添加Vault部署指南和测试文档

chore: 更新Makefile添加测试相关命令
This commit is contained in:
2025-09-29 14:00:22 +00:00
parent f72b17a34f
commit c0064b2cad
72 changed files with 6326 additions and 109 deletions

View File

@@ -0,0 +1,157 @@
job "consul-cluster-simple" {
datacenters = ["dc1"]
type = "service"
group "consul-master" {
count = 1
constraint {
attribute = "${node.unique.name}"
value = "kr-master"
}
network {
port "http" {
static = 8500
}
port "rpc" {
static = 8300
}
port "serf_lan" {
static = 8301
}
port "serf_wan" {
static = 8302
}
}
task "consul" {
driver = "exec"
config {
command = "consul"
args = [
"agent",
"-server",
"-bootstrap-expect=3",
"-data-dir=/opt/nomad/data/consul",
"-client=100.117.106.136",
"-bind=100.117.106.136",
"-advertise=100.117.106.136",
"-retry-join=100.116.80.94",
"-retry-join=100.122.197.112",
"-ui"
]
}
resources {
cpu = 300
memory = 512
}
}
}
group "consul-ash3c" {
count = 1
constraint {
attribute = "${node.unique.name}"
value = "us-ash3c"
}
network {
port "http" {
static = 8500
}
port "rpc" {
static = 8300
}
port "serf_lan" {
static = 8301
}
port "serf_wan" {
static = 8302
}
}
task "consul" {
driver = "exec"
config {
command = "consul"
args = [
"agent",
"-server",
"-bootstrap-expect=3",
"-data-dir=/opt/nomad/data/consul",
"-client=100.116.80.94",
"-bind=100.116.80.94",
"-advertise=100.116.80.94",
"-retry-join=100.117.106.136",
"-retry-join=100.122.197.112",
"-ui"
]
}
resources {
cpu = 300
memory = 512
}
}
}
group "consul-warden" {
count = 1
constraint {
attribute = "${node.unique.name}"
value = "bj-warden"
}
network {
port "http" {
static = 8500
}
port "rpc" {
static = 8300
}
port "serf_lan" {
static = 8301
}
port "serf_wan" {
static = 8302
}
}
task "consul" {
driver = "exec"
config {
command = "consul"
args = [
"agent",
"-server",
"-bootstrap-expect=3",
"-data-dir=/opt/nomad/data/consul",
"-client=100.122.197.112",
"-bind=100.122.197.112",
"-advertise=100.122.197.112",
"-retry-join=100.117.106.136",
"-retry-join=100.116.80.94",
"-ui"
]
}
resources {
cpu = 300
memory = 512
}
}
}
}

View File

@@ -0,0 +1,57 @@
job "consul-cluster" {
datacenters = ["dc1"]
type = "service"
group "consul-servers" {
count = 3
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "(master|ash3c|hcp)"
}
task "consul" {
driver = "podman"
config {
image = "hashicorp/consul:latest"
ports = ["server", "serf_lan", "serf_wan", "ui"]
args = [
"agent",
"-server",
"-bootstrap-expect=3",
"-data-dir=/consul/data",
"-ui",
"-client=0.0.0.0",
"-bind={{ env `NOMAD_IP_server` }}",
"-retry-join=100.117.106.136",
"-retry-join=100.116.80.94",
"-retry-join=100.76.13.187"
]
}
volume_mount {
volume = "consul-data"
destination = "/consul/data"
read_only = false
}
resources {
network {
mbits = 10
port "server" { static = 8300 }
port "serf_lan" { static = 8301 }
port "serf_wan" { static = 8302 }
port "ui" { static = 8500 }
}
}
}
volume "consul-data" {
type = "host"
read_only = false
source = "consul-data"
}
}
}

View File

@@ -0,0 +1,110 @@
job "install-podman-driver" {
datacenters = ["dc1"]
type = "system" # 在所有节点上运行
group "install" {
task "install-podman" {
driver = "exec"
config {
command = "bash"
args = [
"-c",
<<-EOF
set -euo pipefail
export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"
# 依赖工具
if ! command -v jq >/dev/null 2>&1 || ! command -v unzip >/dev/null 2>&1 || ! command -v wget >/dev/null 2>&1; then
echo "Installing dependencies (jq unzip wget)..."
sudo -n apt update -y || true
sudo -n apt install -y jq unzip wget || true
fi
# 安装 Podman若未安装
if ! command -v podman >/dev/null 2>&1; then
echo "Installing Podman..."
sudo -n apt update -y || true
sudo -n apt install -y podman || true
sudo -n systemctl enable podman || true
else
echo "Podman already installed"
fi
# 启用并启动 podman.socket确保 Nomad 可访问
sudo -n systemctl enable --now podman.socket || true
if getent group podman >/dev/null 2>&1; then
sudo -n usermod -aG podman nomad || true
fi
# 安装 Nomad Podman 驱动插件(始终确保存在)
PODMAN_DRIVER_VERSION="0.6.1"
PLUGIN_DIR="/opt/nomad/data/plugins"
sudo -n mkdir -p "${PLUGIN_DIR}" || true
cd /tmp
if [ ! -x "${PLUGIN_DIR}/nomad-driver-podman" ]; then
echo "Installing nomad-driver-podman ${PODMAN_DRIVER_VERSION}..."
wget -q "https://releases.hashicorp.com/nomad-driver-podman/${PODMAN_DRIVER_VERSION}/nomad-driver-podman_${PODMAN_DRIVER_VERSION}_linux_amd64.zip"
unzip -o "nomad-driver-podman_${PODMAN_DRIVER_VERSION}_linux_amd64.zip"
sudo -n mv -f nomad-driver-podman "${PLUGIN_DIR}/"
sudo -n chmod +x "${PLUGIN_DIR}/nomad-driver-podman"
sudo -n chown -R nomad:nomad "${PLUGIN_DIR}"
rm -f "nomad-driver-podman_${PODMAN_DRIVER_VERSION}_linux_amd64.zip"
else
echo "nomad-driver-podman already present in ${PLUGIN_DIR}"
fi
# 更新 /etc/nomad.d/nomad.hcl 的 plugin_dir 设置
if [ -f /etc/nomad.d/nomad.hcl ]; then
if grep -q "^plugin_dir\s*=\s*\"" /etc/nomad.d/nomad.hcl; then
sudo -n sed -i 's#^plugin_dir\s*=\s*\".*\"#plugin_dir = "/opt/nomad/data/plugins"#' /etc/nomad.d/nomad.hcl || true
else
echo 'plugin_dir = "/opt/nomad/data/plugins"' | sudo -n tee -a /etc/nomad.d/nomad.hcl >/dev/null || true
fi
fi
# 重启 Nomad 服务以加载插件
sudo -n systemctl restart nomad || true
echo "Waiting for Nomad to restart..."
sleep 15
# 检查 Podman 驱动是否被 Nomad 检测到
if /usr/local/bin/nomad node status -self -json 2>/dev/null | jq -r '.Drivers.podman.Detected' | grep -q "true"; then
echo "Podman driver successfully loaded"
exit 0
fi
echo "Podman driver not detected yet, retrying once after socket restart..."
sudo -n systemctl restart podman.socket || true
sleep 5
if /usr/local/bin/nomad node status -self -json 2>/dev/null | jq -r '.Drivers.podman.Detected' | grep -q "true"; then
echo "Podman driver successfully loaded after socket restart"
exit 0
else
echo "Podman driver still not detected; manual investigation may be required"
exit 1
fi
EOF
]
}
resources {
cpu = 200
memory = 256
}
// 以root权限运行
// user = "root"
# 使用 nomad 用户运行任务,避免客户端策略禁止 root
user = "nomad"
# 确保任务成功完成
restart {
attempts = 1
interval = "24h"
delay = "60s"
mode = "fail"
}
}
}
}

View File

@@ -0,0 +1,34 @@
job "nfs-volume-example" {
datacenters = ["dc1"]
type = "service"
group "nfs-app" {
count = 1
volume "nfs-shared" {
type = "host"
source = "nfs-shared"
read_only = false
}
task "app" {
driver = "podman"
config {
image = "alpine:latest"
args = ["tail", "-f", "/dev/null"]
}
volume_mount {
volume = "nfs-shared"
destination = "/shared"
read_only = false
}
resources {
cpu = 100
memory = 64
}
}
}
}

View File

@@ -0,0 +1,211 @@
job "vault-cluster-exec" {
datacenters = ["dc1"]
type = "service"
group "vault-master" {
count = 1
constraint {
attribute = "${node.unique.name}"
value = "kr-master"
}
network {
port "api" {
static = 8200
}
port "cluster" {
static = 8201
}
}
task "vault" {
driver = "exec"
config {
command = "vault"
args = [
"server",
"-config=/opt/nomad/data/vault/config/vault.hcl"
]
}
template {
data = <<EOH
storage "consul" {
address = "127.0.0.1:8500"
path = "vault/"
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1 # 生产环境应启用TLS
}
api_addr = "http://{{ env "NOMAD_IP_api" }}:8200"
cluster_addr = "http://{{ env "NOMAD_IP_cluster" }}:8201"
ui = true
disable_mlock = true
EOH
destination = "/opt/nomad/data/vault/config/vault.hcl"
}
resources {
cpu = 500
memory = 1024
}
service {
name = "vault"
port = "api"
check {
name = "vault-health"
type = "http"
path = "/v1/sys/health"
interval = "10s"
timeout = "2s"
}
}
}
}
group "vault-ash3c" {
count = 1
constraint {
attribute = "${node.unique.name}"
value = "us-ash3c"
}
network {
port "api" {
static = 8200
}
port "cluster" {
static = 8201
}
}
task "vault" {
driver = "exec"
config {
command = "vault"
args = [
"server",
"-config=/opt/nomad/data/vault/config/vault.hcl"
]
}
template {
data = <<EOH
storage "consul" {
address = "127.0.0.1:8500"
path = "vault/"
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1 # 生产环境应启用TLS
}
api_addr = "http://{{ env "NOMAD_IP_api" }}:8200"
cluster_addr = "http://{{ env "NOMAD_IP_cluster" }}:8201"
ui = true
disable_mlock = true
EOH
destination = "/opt/nomad/data/vault/config/vault.hcl"
}
resources {
cpu = 500
memory = 1024
}
service {
name = "vault"
port = "api"
check {
name = "vault-health"
type = "http"
path = "/v1/sys/health"
interval = "10s"
timeout = "2s"
}
}
}
}
group "vault-warden" {
count = 1
constraint {
attribute = "${node.unique.name}"
value = "bj-warden"
}
network {
port "api" {
static = 8200
}
port "cluster" {
static = 8201
}
}
task "vault" {
driver = "exec"
config {
command = "vault"
args = [
"server",
"-config=/opt/nomad/data/vault/config/vault.hcl"
]
}
template {
data = <<EOH
storage "consul" {
address = "127.0.0.1:8500"
path = "vault/"
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1 # 生产环境应启用TLS
}
api_addr = "http://{{ env "NOMAD_IP_api" }}:8200"
cluster_addr = "http://{{ env "NOMAD_IP_cluster" }}:8201"
ui = true
disable_mlock = true
EOH
destination = "/opt/nomad/data/vault/config/vault.hcl"
}
resources {
cpu = 500
memory = 1024
}
service {
name = "vault"
port = "api"
check {
name = "vault-health"
type = "http"
path = "/v1/sys/health"
interval = "10s"
timeout = "2s"
}
}
}
}
}

View File

@@ -0,0 +1,94 @@
job "vault-cluster" {
datacenters = ["dc1"]
type = "service"
group "vault-servers" {
count = 3
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "(warden|ash3c|master)"
}
task "vault" {
driver = "podman"
config {
image = "hashicorp/vault:latest"
ports = ["api", "cluster"]
# 确保容器在退出时不会自动重启
command = "vault"
args = [
"server",
"-config=/vault/config/vault.hcl"
]
# 容器网络设置
network_mode = "host"
# 安全设置
cap_add = ["IPC_LOCK"]
}
template {
data = <<EOH
storage "consul" {
address = "127.0.0.1:8500"
path = "vault/"
token = "{{ with secret "consul/creds/vault" }}{{ .Data.token }}{{ end }}"
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1 # 生产环境应启用TLS
}
api_addr = "http://{{ env "NOMAD_IP_api" }}:8200"
cluster_addr = "http://{{ env "NOMAD_IP_cluster" }}:8201"
ui = true
disable_mlock = true
EOH
destination = "vault/config/vault.hcl"
}
volume_mount {
volume = "vault-data"
destination = "/vault/data"
read_only = false
}
resources {
cpu = 500
memory = 1024
network {
mbits = 10
port "api" { static = 8200 }
port "cluster" { static = 8201 }
}
}
service {
name = "vault"
port = "api"
check {
name = "vault-health"
type = "http"
path = "/v1/sys/health"
interval = "10s"
timeout = "2s"
}
}
}
volume "vault-data" {
type = "host"
read_only = false
source = "vault-data"
}
}
}