mgmt/scripts/utilities/final-nomad-fix.yml

111 lines
2.6 KiB
YAML

---
- name: Final Nomad Cluster Fix
hosts: nomad_cluster
become: yes
vars:
nomad_encrypt_key: "NVOMDvXblgWfhtzFzOUIHnKEOrbXOkPrkIPbRGGf1YQ="
tailscale_ips:
semaphore: "100.116.158.95"
master: "100.117.106.136"
ash3c: "100.116.80.94"
tasks:
- name: Stop nomad service
systemd:
name: nomad
state: stopped
ignore_errors: yes
- name: Create required directories
file:
path: "{{ item }}"
state: directory
owner: nomad
group: nomad
mode: '0755'
loop:
- /opt/nomad/data
- /opt/nomad/alloc_mounts
- /var/log/nomad
- name: Clean nomad data
shell: rm -rf /opt/nomad/data/*
ignore_errors: yes
- name: Create working nomad configuration
copy:
content: |
datacenter = "dc1"
region = "global"
data_dir = "/opt/nomad/data"
bind_addr = "{{ tailscale_ips[inventory_hostname] }}"
server {
enabled = true
bootstrap_expect = 3
encrypt = "{{ nomad_encrypt_key }}"
server_join {
retry_join = [
"{{ tailscale_ips.semaphore }}",
"{{ tailscale_ips.master }}",
"{{ tailscale_ips.ash3c }}"
]
}
}
client {
enabled = true
}
ui {
enabled = true
}
addresses {
http = "0.0.0.0"
rpc = "{{ tailscale_ips[inventory_hostname] }}"
serf = "{{ tailscale_ips[inventory_hostname] }}"
}
ports {
http = 4646
rpc = 4647
serf = 4648
}
plugin "docker" {
config {
allow_privileged = true
volumes {
enabled = true
}
}
}
log_level = "INFO"
log_file = "/var/log/nomad/nomad.log"
dest: /etc/nomad.d/nomad.hcl
owner: nomad
group: nomad
mode: '0640'
- name: Start nomad service
systemd:
name: nomad
state: started
enabled: yes
- name: Wait for service to start
pause:
seconds: 10
- name: Check service status
shell: systemctl status nomad --no-pager -l
register: service_status
ignore_errors: yes
- name: Show service status
debug:
var: service_status.stdout_lines