Clean repository: organized structure and GitOps setup
- Organized root directory structure - Moved orphan files to proper locations - Updated .gitignore to ignore temporary files - Set up Gitea Runner for GitOps automation - Fixed Tailscale access issues - Added workflow for automated Nomad deployment
This commit is contained in:
10
ansible/ansible.cfg
Normal file
10
ansible/ansible.cfg
Normal file
@@ -0,0 +1,10 @@
|
||||
[defaults]
|
||||
inventory = inventory/hosts.yml
|
||||
host_key_checking = False
|
||||
timeout = 30
|
||||
gathering = smart
|
||||
fact_caching = memory
|
||||
|
||||
[ssh_connection]
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
|
||||
pipelining = True
|
||||
106
ansible/consul-client-deployment.yml
Normal file
106
ansible/consul-client-deployment.yml
Normal file
@@ -0,0 +1,106 @@
|
||||
---
|
||||
# Ansible Playbook: 部署 Consul Client 到所有 Nomad 节点
|
||||
- name: Deploy Consul Client to Nomad nodes
|
||||
hosts: nomad_clients:nomad_servers
|
||||
become: yes
|
||||
vars:
|
||||
consul_version: "1.21.5"
|
||||
consul_datacenter: "dc1"
|
||||
consul_servers:
|
||||
- "100.117.106.136:8300" # master (韩国)
|
||||
- "100.122.197.112:8300" # warden (北京)
|
||||
- "100.116.80.94:8300" # ash3c (美国)
|
||||
|
||||
tasks:
|
||||
- name: Update APT cache (忽略 GPG 错误)
|
||||
apt:
|
||||
update_cache: yes
|
||||
force_apt_get: yes
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Install consul via APT (假设源已存在)
|
||||
apt:
|
||||
name: consul={{ consul_version }}-*
|
||||
state: present
|
||||
force_apt_get: yes
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Create consul user (if not exists)
|
||||
user:
|
||||
name: consul
|
||||
system: yes
|
||||
shell: /bin/false
|
||||
home: /opt/consul
|
||||
create_home: yes
|
||||
|
||||
- name: Create consul directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: consul
|
||||
group: consul
|
||||
mode: '0755'
|
||||
loop:
|
||||
- /opt/consul
|
||||
- /opt/consul/data
|
||||
- /etc/consul.d
|
||||
- /var/log/consul
|
||||
|
||||
- name: Get node Tailscale IP
|
||||
shell: ip addr show tailscale0 | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1
|
||||
register: tailscale_ip
|
||||
failed_when: tailscale_ip.stdout == ""
|
||||
|
||||
- name: Create consul client configuration
|
||||
template:
|
||||
src: templates/consul-client.hcl.j2
|
||||
dest: /etc/consul.d/consul.hcl
|
||||
owner: consul
|
||||
group: consul
|
||||
mode: '0644'
|
||||
notify: restart consul
|
||||
|
||||
- name: Create consul systemd service
|
||||
template:
|
||||
src: templates/consul.service.j2
|
||||
dest: /etc/systemd/system/consul.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify: reload systemd
|
||||
|
||||
- name: Enable and start consul service
|
||||
systemd:
|
||||
name: consul
|
||||
enabled: yes
|
||||
state: started
|
||||
notify: restart consul
|
||||
|
||||
- name: Wait for consul to be ready
|
||||
uri:
|
||||
url: "http://{{ tailscale_ip.stdout }}:8500/v1/status/leader"
|
||||
status_code: 200
|
||||
timeout: 5
|
||||
register: consul_leader_status
|
||||
until: consul_leader_status.status == 200
|
||||
retries: 30
|
||||
delay: 5
|
||||
|
||||
- name: Verify consul cluster membership
|
||||
shell: consul members -status=alive -format=json | jq -r '.[].Name'
|
||||
register: consul_members
|
||||
changed_when: false
|
||||
|
||||
- name: Display cluster status
|
||||
debug:
|
||||
msg: "Node {{ inventory_hostname.split('.')[0] }} joined cluster with {{ consul_members.stdout_lines | length }} members"
|
||||
|
||||
handlers:
|
||||
- name: reload systemd
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
|
||||
- name: restart consul
|
||||
systemd:
|
||||
name: consul
|
||||
state: restarted
|
||||
198
ansible/fix-warden-zsh.yml
Normal file
198
ansible/fix-warden-zsh.yml
Normal file
@@ -0,0 +1,198 @@
|
||||
---
|
||||
# Ansible Playbook: 修复 warden 节点的 zsh 配置
|
||||
- name: Fix zsh configuration on warden node
|
||||
hosts: warden
|
||||
become: yes
|
||||
vars:
|
||||
target_user: ben # 或者你想修复的用户名
|
||||
|
||||
tasks:
|
||||
- name: 检查当前 shell
|
||||
shell: echo $SHELL
|
||||
register: current_shell
|
||||
changed_when: false
|
||||
|
||||
- name: 显示当前 shell
|
||||
debug:
|
||||
msg: "当前 shell: {{ current_shell.stdout }}"
|
||||
|
||||
- name: 检查 zsh 是否已安装
|
||||
package:
|
||||
name: zsh
|
||||
state: present
|
||||
|
||||
- name: 备份现有的 zsh 配置文件
|
||||
shell: |
|
||||
if [ -f ~/.zshrc ]; then
|
||||
cp ~/.zshrc ~/.zshrc.backup.$(date +%Y%m%d_%H%M%S)
|
||||
echo "已备份 ~/.zshrc"
|
||||
fi
|
||||
if [ -f ~/.zsh_history ]; then
|
||||
cp ~/.zsh_history ~/.zsh_history.backup.$(date +%Y%m%d_%H%M%S)
|
||||
echo "已备份 ~/.zsh_history"
|
||||
fi
|
||||
register: backup_result
|
||||
changed_when: backup_result.stdout != ""
|
||||
|
||||
- name: 显示备份结果
|
||||
debug:
|
||||
msg: "{{ backup_result.stdout_lines }}"
|
||||
when: backup_result.stdout != ""
|
||||
|
||||
- name: 检查 oh-my-zsh 是否存在
|
||||
stat:
|
||||
path: ~/.oh-my-zsh
|
||||
register: ohmyzsh_exists
|
||||
|
||||
- name: 重新安装 oh-my-zsh (如果损坏)
|
||||
shell: |
|
||||
if [ -d ~/.oh-my-zsh ]; then
|
||||
rm -rf ~/.oh-my-zsh
|
||||
fi
|
||||
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" "" --unattended
|
||||
when: not ohmyzsh_exists.stat.exists or ansible_check_mode == false
|
||||
|
||||
- name: 创建基本的 .zshrc 配置
|
||||
copy:
|
||||
content: |
|
||||
# Path to your oh-my-zsh installation.
|
||||
export ZSH="$HOME/.oh-my-zsh"
|
||||
|
||||
# Set name of the theme to load
|
||||
ZSH_THEME="robbyrussell"
|
||||
|
||||
# Which plugins would you like to load?
|
||||
plugins=(git docker docker-compose kubectl)
|
||||
|
||||
source $ZSH/oh-my-zsh.sh
|
||||
|
||||
# User configuration
|
||||
export PATH=$PATH:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
|
||||
|
||||
# Aliases
|
||||
alias ll='ls -alF'
|
||||
alias la='ls -A'
|
||||
alias l='ls -CF'
|
||||
alias ..='cd ..'
|
||||
alias ...='cd ../..'
|
||||
|
||||
# Nomad/Consul aliases
|
||||
alias nomad-status='nomad status'
|
||||
alias consul-members='consul members'
|
||||
|
||||
# History settings
|
||||
HISTSIZE=10000
|
||||
SAVEHIST=10000
|
||||
setopt HIST_IGNORE_DUPS
|
||||
setopt HIST_IGNORE_SPACE
|
||||
setopt HIST_VERIFY
|
||||
setopt SHARE_HISTORY
|
||||
dest: ~/.zshrc
|
||||
owner: "{{ target_user }}"
|
||||
group: "{{ target_user }}"
|
||||
mode: '0644'
|
||||
backup: yes
|
||||
|
||||
- name: 设置 zsh 为默认 shell
|
||||
user:
|
||||
name: "{{ target_user }}"
|
||||
shell: /usr/bin/zsh
|
||||
|
||||
- name: 检查 zsh 配置语法
|
||||
shell: zsh -n ~/.zshrc
|
||||
register: zsh_syntax_check
|
||||
failed_when: zsh_syntax_check.rc != 0
|
||||
changed_when: false
|
||||
|
||||
- name: 测试 zsh 启动
|
||||
shell: zsh -c "echo 'zsh 配置测试成功'"
|
||||
register: zsh_test
|
||||
changed_when: false
|
||||
|
||||
- name: 显示修复结果
|
||||
debug:
|
||||
msg:
|
||||
- "zsh 配置修复完成"
|
||||
- "语法检查: {{ 'PASS' if zsh_syntax_check.rc == 0 else 'FAIL' }}"
|
||||
- "启动测试: {{ zsh_test.stdout }}"
|
||||
|
||||
- name: 清理损坏的历史文件
|
||||
shell: |
|
||||
if [ -f ~/.zsh_history ]; then
|
||||
# 尝试修复历史文件
|
||||
strings ~/.zsh_history > ~/.zsh_history.clean
|
||||
mv ~/.zsh_history.clean ~/.zsh_history
|
||||
echo "已清理 zsh 历史文件"
|
||||
fi
|
||||
register: history_cleanup
|
||||
changed_when: history_cleanup.stdout != ""
|
||||
|
||||
- name: 修复 DNS 配置问题
|
||||
shell: |
|
||||
# 备份现有DNS配置
|
||||
sudo cp /etc/resolv.conf /etc/resolv.conf.backup.$(date +%Y%m%d_%H%M%S)
|
||||
|
||||
# 添加备用DNS服务器
|
||||
echo "# 备用DNS服务器配置" | sudo tee -a /etc/resolv.conf
|
||||
echo "nameserver 8.8.8.8" | sudo tee -a /etc/resolv.conf
|
||||
echo "nameserver 8.8.4.4" | sudo tee -a /etc/resolv.conf
|
||||
echo "nameserver 1.1.1.1" | sudo tee -a /etc/resolv.conf
|
||||
|
||||
echo "已添加备用DNS服务器"
|
||||
register: dns_fix
|
||||
changed_when: dns_fix.stdout != ""
|
||||
|
||||
- name: 测试 DNS 修复
|
||||
shell: nslookup github.com
|
||||
register: dns_test
|
||||
changed_when: false
|
||||
|
||||
- name: 显示 DNS 测试结果
|
||||
debug:
|
||||
msg: "{{ dns_test.stdout_lines }}"
|
||||
|
||||
- name: 修复 zsh completion 权限问题
|
||||
shell: |
|
||||
# 修复系统 completion 目录权限
|
||||
sudo chown -R root:root /usr/share/zsh/vendor-completions/ 2>/dev/null || true
|
||||
sudo chown -R root:root /usr/share/bash-completion/ 2>/dev/null || true
|
||||
sudo chown -R root:root /usr/share/fish/vendor_completions.d/ 2>/dev/null || true
|
||||
sudo chown -R root:root /usr/local/share/zsh/site-functions/ 2>/dev/null || true
|
||||
|
||||
# 设置正确的权限
|
||||
sudo chmod -R 755 /usr/share/zsh/vendor-completions/ 2>/dev/null || true
|
||||
sudo chmod -R 755 /usr/share/bash-completion/ 2>/dev/null || true
|
||||
sudo chmod -R 755 /usr/share/fish/vendor_completions.d/ 2>/dev/null || true
|
||||
sudo chmod -R 755 /usr/local/share/zsh/site-functions/ 2>/dev/null || true
|
||||
|
||||
# 修复 oh-my-zsh completion 目录权限(如果存在)
|
||||
if [ -d ~/.oh-my-zsh ]; then
|
||||
chmod -R 755 ~/.oh-my-zsh/completions
|
||||
chmod -R 755 ~/.oh-my-zsh/plugins
|
||||
chmod -R 755 ~/.oh-my-zsh/lib
|
||||
echo "已修复 oh-my-zsh 目录权限"
|
||||
fi
|
||||
|
||||
# 重新生成 completion 缓存
|
||||
rm -f ~/.zcompdump* 2>/dev/null || true
|
||||
echo "已修复系统 completion 目录权限并清理缓存"
|
||||
register: completion_fix
|
||||
changed_when: completion_fix.stdout != ""
|
||||
|
||||
- name: 显示 completion 修复结果
|
||||
debug:
|
||||
msg: "{{ completion_fix.stdout_lines }}"
|
||||
when: completion_fix.stdout != ""
|
||||
|
||||
- name: 测试 zsh completion 修复
|
||||
shell: zsh -c "autoload -U compinit && compinit -D && echo 'completion 系统修复成功'"
|
||||
register: completion_test
|
||||
changed_when: false
|
||||
|
||||
- name: 重新加载 zsh 配置提示
|
||||
debug:
|
||||
msg:
|
||||
- "修复完成!请执行以下命令重新加载配置:"
|
||||
- "source ~/.zshrc"
|
||||
- "或者重新登录以使用新的 shell 配置"
|
||||
- "completion 权限问题已修复"
|
||||
10
ansible/inventory/hosts.yml
Normal file
10
ansible/inventory/hosts.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
all:
|
||||
children:
|
||||
warden:
|
||||
hosts:
|
||||
warden:
|
||||
ansible_host: 100.122.197.112
|
||||
ansible_user: ben
|
||||
ansible_password: "3131"
|
||||
ansible_become_password: "3131"
|
||||
61
ansible/templates/consul-client.hcl.j2
Normal file
61
ansible/templates/consul-client.hcl.j2
Normal file
@@ -0,0 +1,61 @@
|
||||
# Consul Client Configuration for {{ inventory_hostname }}
|
||||
datacenter = "{{ consul_datacenter }}"
|
||||
data_dir = "/opt/consul/data"
|
||||
log_level = "INFO"
|
||||
node_name = "{{ inventory_hostname.split('.')[0] }}"
|
||||
bind_addr = "{{ tailscale_ip.stdout }}"
|
||||
|
||||
# Client mode (not server)
|
||||
server = false
|
||||
|
||||
# Connect to Consul servers (指向三节点集群)
|
||||
retry_join = [
|
||||
"100.117.106.136", # master (韩国)
|
||||
"100.122.197.112", # warden (北京)
|
||||
"100.116.80.94" # ash3c (美国)
|
||||
]
|
||||
|
||||
# Performance optimization
|
||||
performance {
|
||||
raft_multiplier = 5
|
||||
}
|
||||
|
||||
# Ports configuration
|
||||
ports {
|
||||
grpc = 8502
|
||||
http = 8500
|
||||
dns = 8600
|
||||
}
|
||||
|
||||
# Enable Connect for service mesh
|
||||
connect {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
# Cache configuration for performance
|
||||
cache {
|
||||
entry_fetch_max_burst = 42
|
||||
entry_fetch_rate = 30
|
||||
}
|
||||
|
||||
# Node metadata
|
||||
node_meta = {
|
||||
region = "{{ region | default('unknown') }}"
|
||||
zone = "nomad-server"
|
||||
}
|
||||
|
||||
# UI disabled for clients
|
||||
ui_config {
|
||||
enabled = false
|
||||
}
|
||||
|
||||
# ACL configuration (if needed)
|
||||
acl = {
|
||||
enabled = false
|
||||
default_policy = "allow"
|
||||
}
|
||||
|
||||
# Logging
|
||||
log_file = "/var/log/consul/consul.log"
|
||||
log_rotate_duration = "24h"
|
||||
log_rotate_max_files = 7
|
||||
26
ansible/templates/consul.service.j2
Normal file
26
ansible/templates/consul.service.j2
Normal file
@@ -0,0 +1,26 @@
|
||||
[Unit]
|
||||
Description=Consul Client
|
||||
Documentation=https://www.consul.io/
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
ConditionFileNotEmpty=/etc/consul.d/consul.hcl
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
User=consul
|
||||
Group=consul
|
||||
ExecStart=/usr/bin/consul agent -config-dir=/etc/consul.d
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=process
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
# Security settings
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectHome=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=/opt/consul /var/log/consul
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Reference in New Issue
Block a user