mgmt/scripts/utilities/final-nomad-cluster-fix.yml

190 lines
4.8 KiB
YAML

---
- name: Final Complete Nomad Cluster Fix
hosts: nomad_cluster
become: yes
gather_facts: yes
vars:
nomad_encrypt_key: "NVOMDvXblgWfhtzFzOUIHnKEOrbXOkPrkIPbRGGf1YQ="
nomad_servers:
- "100.116.158.95:4647" # semaphore
- "100.117.106.136:4647" # master
- "100.116.80.94:4647" # ash3c
tasks:
- name: Stop nomad service
systemd:
name: nomad
state: stopped
ignore_errors: yes
- name: Reset failed nomad service
systemd:
name: nomad
daemon_reload: yes
ignore_errors: yes
- name: Create nomad user if not exists
user:
name: nomad
system: yes
shell: /bin/false
home: /opt/nomad
create_home: no
- name: Create nomad directories with correct permissions
file:
path: "{{ item }}"
state: directory
owner: nomad
group: nomad
mode: '0755'
loop:
- /etc/nomad.d
- /opt/nomad
- /opt/nomad/data
- /opt/nomad/alloc_mounts
- /var/log/nomad
- name: Clean old nomad data
file:
path: /opt/nomad/data
state: absent
- name: Recreate nomad data directory
file:
path: /opt/nomad/data
state: directory
owner: nomad
group: nomad
mode: '0755'
- name: Get Tailscale IP address
shell: ip addr show tailscale0 | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1
register: tailscale_ip
failed_when: false
- name: Set bind address (fallback to default interface if tailscale not available)
set_fact:
bind_address: "{{ tailscale_ip.stdout if tailscale_ip.stdout != '' else ansible_default_ipv4.address }}"
- name: Generate nomad configuration
template:
src: nomad-server.hcl.j2
dest: /etc/nomad.d/nomad.hcl
owner: nomad
group: nomad
mode: '0640'
vars:
nomad_datacenter: "dc1"
nomad_region: "global"
nomad_data_dir: "/opt/nomad/data"
nomad_bind_addr: "{{ bind_address }}"
nomad_bootstrap_expect: 3
nomad_encrypt: "{{ nomad_encrypt_key }}"
nomad_retry_join: "{{ nomad_servers }}"
nomad_alloc_dir: "/opt/nomad/alloc_mounts"
nomad_log_file: "/var/log/nomad/nomad.log"
- name: Create nomad systemd service
copy:
content: |
[Unit]
Description=Nomad
Documentation=https://www.nomadproject.io/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/nomad.d/nomad.hcl
[Service]
Type=notify
User=nomad
Group=nomad
ExecStart=/usr/bin/nomad agent -config=/etc/nomad.d/nomad.hcl
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
dest: /etc/systemd/system/nomad.service
mode: '0644'
- name: Reload systemd daemon
systemd:
daemon_reload: yes
- name: Start nomad service
systemd:
name: nomad
state: started
enabled: yes
- name: Wait for nomad to start
wait_for:
port: 4646
host: "{{ bind_address }}"
delay: 5
timeout: 30
ignore_errors: yes
- name: Create nomad configuration template
hosts: localhost
gather_facts: no
tasks:
- name: Create nomad server template
copy:
content: |
datacenter = "{{ nomad_datacenter }}"
region = "{{ nomad_region }}"
data_dir = "{{ nomad_data_dir }}"
bind_addr = "{{ nomad_bind_addr }}"
server {
enabled = true
bootstrap_expect = {{ nomad_bootstrap_expect }}
encrypt = "{{ nomad_encrypt }}"
server_join {
retry_join = {{ nomad_retry_join | to_json }}
retry_interval = "15s"
retry_max = 3
}
}
client {
enabled = true
alloc_dir = "{{ nomad_alloc_dir }}"
}
ui {
enabled = true
}
addresses {
http = "0.0.0.0"
rpc = "{{ nomad_bind_addr }}"
serf = "{{ nomad_bind_addr }}"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
plugin "docker" {
config {
allow_privileged = true
volumes {
enabled = true
}
}
}
log_level = "INFO"
log_file = "{{ nomad_log_file }}"
dest: /tmp/nomad-server.hcl.j2
delegate_to: localhost
run_once: true