This commit is contained in:
2025-09-23 13:37:17 +00:00
parent 2fe53b6504
commit 3f45ad8361
74 changed files with 5012 additions and 3336 deletions

View File

@@ -0,0 +1,14 @@
{
"proxies": {
"http-proxy": "http://istoreos.tailnet-68f9.ts.net:7891",
"https-proxy": "http://istoreos.tailnet-68f9.ts.net:7891",
"no-proxy": "localhost,127.0.0.1,::1,.local,.tailnet-68f9.ts.net"
},
"registry-mirrors": [],
"insecure-registries": [],
"debug": false,
"experimental": false,
"features": {
"buildkit": true
}
}

View File

@@ -0,0 +1,10 @@
[consul_cluster]
master ansible_host=master ansible_port=60022 ansible_user=ben ansible_become=yes ansible_become_pass=3131
ash3c ansible_host=ash3c ansible_user=ben ansible_become=yes ansible_become_pass=3131
[consul_cluster:vars]
ansible_ssh_common_args='-o StrictHostKeyChecking=no'
consul_version=1.21.4
consul_datacenter=dc1
# 生成加密密钥: consul keygen
vault_consul_encrypt_key=1EvGItLOB8nuHnSA0o+rO0zXzLeJl+U+Jfvuw0+H848=

View File

@@ -0,0 +1,20 @@
[nomad_servers]
master ansible_host=100.117.106.136 ansible_port=60022 ansible_user=ben ansible_become=yes ansible_become_pass=3131 nomad_role=server nomad_bootstrap_expect=3
semaphore ansible_connection=local nomad_role=server nomad_bootstrap_expect=3
ash3c ansible_host=100.116.80.94 ansible_port=22 ansible_user=ben ansible_become=yes ansible_become_pass=3131 nomad_role=server nomad_bootstrap_expect=3
[nomad_clients]
# 如果需要客户端节点,可以在这里添加
[nomad_cluster:children]
nomad_servers
nomad_clients
[nomad_cluster:vars]
ansible_ssh_private_key_file=~/.ssh/id_ed25519
ansible_user=ben
ansible_become=yes
nomad_version=1.10.5
nomad_datacenter=dc1
nomad_region=global
nomad_encrypt_key=NVOMDvXblgWfhtzFzOUIHnKEOrbXOkPrkIPbRGGf1YQ=

View File

@@ -0,0 +1,15 @@
[nomad_servers]
localhost ansible_connection=local nomad_role=server nomad_bootstrap_expect=1
[nomad_clients]
# 如果需要客户端节点,可以在这里添加
[nomad_cluster:children]
nomad_servers
nomad_clients
[nomad_cluster:vars]
ansible_user=root
nomad_version=1.6.2
nomad_datacenter=dc1
nomad_region=global

View File

@@ -1,72 +0,0 @@
---
- name: Cloud Providers System Update Playbook
hosts: huawei,google,ditigalocean,aws
become: yes
gather_facts: yes
tasks:
# Ubuntu/Debian 系统更新 (apt)
- name: Update apt cache (Ubuntu/Debian)
apt:
update_cache: yes
cache_valid_time: 3600
when: ansible_os_family == "Debian"
- name: Upgrade all packages (Ubuntu/Debian)
apt:
upgrade: yes
autoremove: yes
autoclean: yes
when: ansible_os_family == "Debian"
register: apt_upgrade_result
# AWS Linux 系统更新 (dnf)
- name: Update dnf cache (AWS Linux/RHEL)
dnf:
update_cache: yes
when: ansible_os_family == "RedHat"
- name: Upgrade all packages (AWS Linux/RHEL)
dnf:
name: "*"
state: latest
skip_broken: yes
when: ansible_os_family == "RedHat"
register: dnf_upgrade_result
# 显示升级结果
- name: Display apt upgrade results
debug:
msg: "APT system upgrade completed. {{ apt_upgrade_result.changed }} packages were updated."
when: ansible_os_family == "Debian" and apt_upgrade_result is defined
- name: Display dnf upgrade results
debug:
msg: "DNF system upgrade completed. {{ dnf_upgrade_result.changed }} packages were updated."
when: ansible_os_family == "RedHat" and dnf_upgrade_result is defined
# 检查是否需要重启 (Ubuntu/Debian)
- name: Check if reboot is required (Ubuntu/Debian)
stat:
path: /var/run/reboot-required
register: debian_reboot_required
when: ansible_os_family == "Debian"
# 检查是否需要重启 (AWS Linux/RHEL)
- name: Check if reboot is required (AWS Linux/RHEL)
command: needs-restarting -r
register: rhel_reboot_required
failed_when: false
changed_when: false
when: ansible_os_family == "RedHat"
# 通知重启信息
- name: Notify if reboot is required (Ubuntu/Debian)
debug:
msg: "System reboot is required to complete the update."
when: ansible_os_family == "Debian" and debian_reboot_required.stat.exists is defined and debian_reboot_required.stat.exists
- name: Notify if reboot is required (AWS Linux/RHEL)
debug:
msg: "System reboot is required to complete the update."
when: ansible_os_family == "RedHat" and rhel_reboot_required.rc == 1

View File

@@ -1,128 +0,0 @@
---
- name: Docker Container Management
hosts: all
become: yes
gather_facts: yes
tasks:
# 检查 Docker 是否安装
- name: Check if Docker is installed
command: which docker
register: docker_installed
failed_when: false
changed_when: false
- name: Skip Docker tasks if not installed
debug:
msg: "Docker not installed on {{ inventory_hostname }}, skipping Docker tasks"
when: docker_installed.rc != 0
# Docker 系统信息
- name: Get Docker system info
shell: docker system df
register: docker_system_info
when: docker_installed.rc == 0
- name: Display Docker system usage
debug:
msg: "🐳 Docker System Usage: {{ docker_system_info.stdout_lines }}"
when: docker_installed.rc == 0
# 检查运行中的容器
- name: List running containers
shell: docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
register: running_containers
when: docker_installed.rc == 0
- name: Display running containers
debug:
msg: "📦 Running Containers: {{ running_containers.stdout_lines }}"
when: docker_installed.rc == 0
# 检查停止的容器
- name: List stopped containers
shell: docker ps -a --filter "status=exited" --format "table {{.Names}}\t{{.Status}}"
register: stopped_containers
when: docker_installed.rc == 0
- name: Display stopped containers
debug:
msg: "⏹️ Stopped Containers: {{ stopped_containers.stdout_lines }}"
when: docker_installed.rc == 0 and stopped_containers.stdout_lines | length > 1
# 检查 Docker 镜像
- name: List Docker images
shell: docker images --format "table {{.Repository}}\t{{.Tag}}\t{{.Size}}"
register: docker_images
when: docker_installed.rc == 0
- name: Display Docker images
debug:
msg: "🖼️ Docker Images: {{ docker_images.stdout_lines }}"
when: docker_installed.rc == 0
# 检查悬空镜像
- name: Check for dangling images
shell: docker images -f "dangling=true" -q
register: dangling_images
when: docker_installed.rc == 0
- name: Report dangling images
debug:
msg: "🗑️ Found {{ dangling_images.stdout_lines | length }} dangling images"
when: docker_installed.rc == 0
# 检查 Docker 卷
- name: List Docker volumes
shell: docker volume ls
register: docker_volumes
when: docker_installed.rc == 0
- name: Display Docker volumes
debug:
msg: "💾 Docker Volumes: {{ docker_volumes.stdout_lines }}"
when: docker_installed.rc == 0
# 检查 Docker 网络
- name: List Docker networks
shell: docker network ls
register: docker_networks
when: docker_installed.rc == 0
- name: Display Docker networks
debug:
msg: "🌐 Docker Networks: {{ docker_networks.stdout_lines }}"
when: docker_installed.rc == 0
# 检查容器资源使用
- name: Check container resource usage
shell: docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}"
register: container_stats
when: docker_installed.rc == 0
- name: Display container resource usage
debug:
msg: "📊 Container Stats: {{ container_stats.stdout_lines }}"
when: docker_installed.rc == 0
# 检查 Docker 服务状态
- name: Check Docker service status
systemd:
name: docker
register: docker_service_status
when: docker_installed.rc == 0
- name: Display Docker service status
debug:
msg: "🔧 Docker Service: {{ docker_service_status.status.ActiveState }}"
when: docker_installed.rc == 0
# 清理建议
- name: Suggest cleanup if needed
debug:
msg: |
💡 Cleanup suggestions:
- Run 'docker system prune -f' to remove unused data
- Run 'docker image prune -f' to remove dangling images
- Run 'docker volume prune -f' to remove unused volumes
when: docker_installed.rc == 0 and (dangling_images.stdout_lines | length > 0 or stopped_containers.stdout_lines | length > 1)

View File

@@ -1,97 +0,0 @@
---
- name: Docker Status Check for HCP Nodes
hosts: hcp
gather_facts: yes
become: yes
tasks:
- name: Check if Docker is installed
command: docker --version
register: docker_version
ignore_errors: yes
- name: Display Docker version
debug:
msg: "Docker version: {{ docker_version.stdout }}"
when: docker_version.rc == 0
- name: Check Docker service status
systemd:
name: docker
register: docker_service_status
- name: Display Docker service status
debug:
msg: "Docker service is {{ docker_service_status.status.ActiveState }}"
- name: Check Docker daemon info
command: docker info --format "{{ '{{' }}.ServerVersion{{ '}}' }}"
register: docker_info
ignore_errors: yes
- name: Display Docker daemon info
debug:
msg: "Docker daemon version: {{ docker_info.stdout }}"
when: docker_info.rc == 0
- name: Check Docker Swarm status
command: docker info --format "{{ '{{' }}.Swarm.LocalNodeState{{ '}}' }}"
register: swarm_status
ignore_errors: yes
- name: Display Swarm status
debug:
msg: "Swarm status: {{ swarm_status.stdout }}"
when: swarm_status.rc == 0
- name: Get Docker Swarm node info (if in swarm)
command: docker node ls
register: swarm_nodes
ignore_errors: yes
when: swarm_status.stdout == "active"
- name: Display Swarm nodes
debug:
msg: "{{ swarm_nodes.stdout_lines }}"
when: swarm_nodes is defined and swarm_nodes.rc == 0
- name: List running containers
command: docker ps --format "table {{ '{{' }}.Names{{ '}}' }}\t{{ '{{' }}.Status{{ '}}' }}\t{{ '{{' }}.Ports{{ '}}' }}"
register: running_containers
ignore_errors: yes
- name: Display running containers
debug:
msg: "{{ running_containers.stdout_lines }}"
when: running_containers.rc == 0
- name: Check Docker network list
command: docker network ls
register: docker_networks
ignore_errors: yes
- name: Display Docker networks
debug:
msg: "{{ docker_networks.stdout_lines }}"
when: docker_networks.rc == 0
- name: Get Docker system info
command: docker system df
register: docker_system_info
ignore_errors: yes
- name: Display Docker system usage
debug:
msg: "{{ docker_system_info.stdout_lines }}"
when: docker_system_info.rc == 0
- name: Check if node is Swarm manager
command: docker node inspect self --format "{{ '{{' }}.ManagerStatus.Leader{{ '}}' }}"
register: is_manager
ignore_errors: yes
when: swarm_status.stdout == "active"
- name: Display manager status
debug:
msg: "Is Swarm manager: {{ is_manager.stdout }}"
when: is_manager is defined and is_manager.rc == 0

View File

@@ -1,210 +0,0 @@
---
- name: Simple Docker Swarm Analysis for ash3c
hosts: ash3c
become: yes
gather_facts: yes
tasks:
# 基础检查
- name: Check if Docker is installed
command: which docker
register: docker_installed
failed_when: false
changed_when: false
- name: Fail if Docker not installed
fail:
msg: "Docker is not installed on {{ inventory_hostname }}"
when: docker_installed.rc != 0
# 检查当前 Swarm 状态
- name: Check Docker Swarm status
shell: docker info | grep "Swarm:" -A 1
register: swarm_status
- name: Display current Swarm status
debug:
msg: "🔍 Current Swarm Status: {{ swarm_status.stdout_lines }}"
# 获取运行中的容器
- name: Get running containers
shell: docker ps --format "table {{ '{{' }}.Names{{ '}}' }}\t{{ '{{' }}.Image{{ '}}' }}\t{{ '{{' }}.Status{{ '}}' }}\t{{ '{{' }}.Ports{{ '}}' }}"
register: running_containers
- name: Display running containers
debug:
msg: "🏃 Running Containers: {{ running_containers.stdout_lines }}"
# 获取所有容器(包括停止的)
- name: Get all containers
shell: docker ps -a --format "table {{ '{{' }}.Names{{ '}}' }}\t{{ '{{' }}.Image{{ '}}' }}\t{{ '{{' }}.Status{{ '}}' }}"
register: all_containers
- name: Display all containers
debug:
msg: "📦 All Containers: {{ all_containers.stdout_lines }}"
# 检查每个容器的详细信息
- name: Get container names only
shell: docker ps -a --format "{{ '{{' }}.Names{{ '}}' }}"
register: container_names
- name: Inspect each container
shell: |
echo "=== Container: {{ item }} ==="
echo "Image: $(docker inspect {{ item }} --format '{{ '{{' }}.Config.Image{{ '}}' }}')"
echo "Status: $(docker inspect {{ item }} --format '{{ '{{' }}.State.Status{{ '}}' }}')"
echo "Restart Policy: $(docker inspect {{ item }} --format '{{ '{{' }}.HostConfig.RestartPolicy.Name{{ '}}' }}')"
echo "Network Mode: $(docker inspect {{ item }} --format '{{ '{{' }}.HostConfig.NetworkMode{{ '}}' }}')"
echo "Published Ports: $(docker port {{ item }} 2>/dev/null || echo 'None')"
echo "Volumes/Mounts:"
docker inspect {{ item }} --format '{{ '{{' }}range .Mounts{{ '}}' }} {{ '{{' }}.Source{{ '}}' }}:{{ '{{' }}.Destination{{ '}}' }} ({{ '{{' }}.Mode{{ '}}' }}){{ '{{' }}"\n"{{ '}}' }}{{ '{{' }}end{{ '}}' }}' || echo " None"
echo "Environment Variables:"
docker inspect {{ item }} --format '{{ '{{' }}range .Config.Env{{ '}}' }} {{ '{{' }}.{{ '}}' }}{{ '{{' }}"\n"{{ '}}' }}{{ '{{' }}end{{ '}}' }}' | head -10
echo "Labels:"
docker inspect {{ item }} --format '{{ '{{' }}range $key, $value := .Config.Labels{{ '}}' }} {{ '{{' }}$key{{ '}}' }}={{ '{{' }}$value{{ '}}' }}{{ '{{' }}"\n"{{ '}}' }}{{ '{{' }}end{{ '}}' }}' | head -5
echo "---"
register: container_inspect
loop: "{{ container_names.stdout_lines }}"
when: container_names.stdout_lines | length > 0
- name: Display container inspection results
debug:
msg: "{{ item.stdout }}"
loop: "{{ container_inspect.results }}"
when: container_inspect is defined
# 检查 Docker Compose 文件
- name: Find docker-compose files
find:
paths:
- /root
- /home
- /opt
patterns:
- "docker-compose.yml"
- "docker-compose.yaml"
- "compose.yml"
- "compose.yaml"
recurse: yes
depth: 3
register: compose_files
- name: Display found compose files
debug:
msg: "📄 Found compose files: {{ item.path }}"
loop: "{{ compose_files.files }}"
when: compose_files.files | length > 0
# 分析网络配置
- name: Get Docker networks
shell: docker network ls
register: docker_networks
- name: Display Docker networks
debug:
msg: "🌐 Docker Networks: {{ docker_networks.stdout_lines }}"
# 检查卷使用情况
- name: Get Docker volumes
shell: docker volume ls
register: docker_volumes
- name: Display Docker volumes
debug:
msg: "💾 Docker Volumes: {{ docker_volumes.stdout_lines }}"
# 检查容器资源使用
- name: Get container resource usage
shell: docker stats --no-stream
register: container_stats
when: container_names.stdout_lines | length > 0
- name: Display container stats
debug:
msg: "📊 Container Resource Usage: {{ container_stats.stdout_lines }}"
when: container_stats is defined
# 生成 Swarm 适用性分析
- name: Generate Swarm suitability analysis
debug:
msg: |
🔍 DOCKER SWARM MIGRATION ANALYSIS FOR {{ inventory_hostname }}
================================================================
📋 SUMMARY:
- Current Swarm Status: {{ 'Active' if 'active' in swarm_status.stdout else 'Inactive' }}
- Total Containers: {{ container_names.stdout_lines | length }}
- Running Containers: {{ (running_containers.stdout_lines | length) - 1 }}
- Compose Files Found: {{ compose_files.files | length }}
💡 GENERAL RECOMMENDATIONS:
✅ SUITABLE FOR SWARM (typically):
- Web applications (nginx, apache, etc.)
- API services
- Databases (with proper volume management)
- Monitoring tools (prometheus, grafana, etc.)
- Load balancers
❌ NOT SUITABLE FOR SWARM:
- Containers using Docker socket (/var/run/docker.sock)
- Containers with --privileged flag
- Containers requiring specific host access
- Development/testing containers
⚠️ NEEDS MODIFICATION:
- Containers using bind mounts (convert to volumes)
- Containers without restart policies
- Containers using host networking
🚀 NEXT STEPS:
1. Review each container's configuration above
2. Identify services that can benefit from scaling
3. Convert suitable containers to Docker services
4. Set up overlay networks
5. Configure secrets and configs management
📝 MIGRATION CHECKLIST:
□ Initialize Swarm (already done: {{ 'Yes' if 'active' in swarm_status.stdout else 'No' }})
□ Create overlay networks
□ Convert containers to services
□ Set up service discovery
□ Configure load balancing
□ Test service scaling
□ Set up monitoring
when: container_names is defined
# 保存分析结果
- name: Save analysis summary
copy:
content: |
Docker Swarm Analysis for {{ inventory_hostname }}
Generated: {{ ansible_date_time.iso8601 }}
Current Swarm Status: {{ swarm_status.stdout }}
Total Containers: {{ container_names.stdout_lines | length }}
Container List:
{{ container_names.stdout_lines | join('\n') }}
Networks:
{{ docker_networks.stdout }}
Volumes:
{{ docker_volumes.stdout }}
Compose Files Found:
{% for file in compose_files.files %}
- {{ file.path }}
{% endfor %}
dest: "/tmp/swarm-analysis-{{ inventory_hostname }}-{{ ansible_date_time.epoch }}.txt"
- name: Analysis complete
debug:
msg: |
🎉 Analysis complete!
Results saved to: /tmp/swarm-analysis-{{ inventory_hostname }}-{{ ansible_date_time.epoch }}.txt
Review the container details above to determine which services
are suitable for Swarm migration.

View File

@@ -1,246 +0,0 @@
---
- name: Docker Swarm Migration Analysis for ash3c
hosts: ash3c
become: yes
gather_facts: yes
vars:
analysis_results: []
tasks:
# 基础检查
- name: Check if Docker is installed
command: which docker
register: docker_installed
failed_when: false
changed_when: false
- name: Fail if Docker not installed
fail:
msg: "Docker is not installed on {{ inventory_hostname }}"
when: docker_installed.rc != 0
# 检查当前 Swarm 状态
- name: Check Docker Swarm status
shell: docker info --format "{{ '{{' }}.Swarm.LocalNodeState{{ '}}' }}"
register: swarm_status
- name: Display current Swarm status
debug:
msg: "🔍 Current Swarm Status: {{ swarm_status.stdout }}"
# 获取所有容器的详细信息
- name: Get all containers (running and stopped)
shell: docker ps -a --format "{{ '{{' }}.Names{{ '}}' }}"
register: all_containers
- name: Get basic container information
shell: |
echo "=== Container: {{ item }} ==="
docker inspect {{ item }} | jq -r '
.[0] |
"Image: " + .Config.Image,
"Status: " + .State.Status,
"RestartPolicy: " + .HostConfig.RestartPolicy.Name,
"NetworkMode: " + .HostConfig.NetworkMode,
"Ports: " + (.NetworkSettings.Ports | keys | join(", ")),
"Volumes: " + ([.Mounts[]? | .Source + ":" + .Destination + ":" + .Mode] | join(" ")),
"Memory: " + (.HostConfig.Memory | tostring),
"CPUs: " + (.HostConfig.NanoCpus | tostring)
'
echo "---"
register: container_details
loop: "{{ all_containers.stdout_lines }}"
when: all_containers.stdout_lines | length > 0
- name: Display container details
debug:
msg: "{{ item.stdout }}"
loop: "{{ container_details.results }}"
when: container_details is defined
# 检查 Docker Compose 文件
- name: Find docker-compose files
find:
paths:
- /root
- /home
- /opt
patterns:
- "docker-compose.yml"
- "docker-compose.yaml"
- "compose.yml"
- "compose.yaml"
recurse: yes
register: compose_files
- name: Display found compose files
debug:
msg: "📄 Found compose files: {{ item.path }}"
loop: "{{ compose_files.files }}"
when: compose_files.files | length > 0
# 分析网络配置
- name: Get Docker networks
shell: docker network ls --format "{{ '{{' }}.Name{{ '}}' }}\t{{ '{{' }}.Driver{{ '}}' }}\t{{ '{{' }}.Scope{{ '}}' }}"
register: docker_networks
- name: Display Docker networks
debug:
msg: "🌐 Docker Networks: {{ docker_networks.stdout_lines }}"
# 检查卷使用情况
- name: Get Docker volumes
shell: docker volume ls --format "{{ '{{' }}.Name{{ '}}' }}\t{{ '{{' }}.Driver{{ '}}' }}"
register: docker_volumes
- name: Display Docker volumes
debug:
msg: "💾 Docker Volumes: {{ docker_volumes.stdout_lines }}"
# 检查容器资源使用
- name: Get container resource usage
shell: docker stats --no-stream --format "{{ '{{' }}.Name{{ '}}' }}\t{{ '{{' }}.CPUPerc{{ '}}' }}\t{{ '{{' }}.MemUsage{{ '}}' }}\t{{ '{{' }}.NetIO{{ '}}' }}\t{{ '{{' }}.BlockIO{{ '}}' }}"
register: container_stats
when: all_containers.stdout_lines | length > 0
- name: Display container stats
debug:
msg: "📊 Container Resource Usage: {{ container_stats.stdout_lines }}"
when: container_stats is defined
# 分析服务类型和 Swarm 适用性
- name: Analyze containers for Swarm suitability
set_fact:
swarm_analysis: |
🔍 SWARM MIGRATION ANALYSIS FOR {{ inventory_hostname }}
================================================
Current Swarm Status: {{ swarm_status.stdout }}
Total Containers: {{ all_containers.stdout_lines | length }}
📋 CONTAINER ANALYSIS:
{% for container in container_details.results %}
Container: {{ container.item }}
{% set details = container.stdout.split('\n') %}
{% for line in details %}
{{ line }}
{% endfor %}
SWARM SUITABILITY ASSESSMENT:
{% if 'restart=always' in container.stdout or 'restart=unless-stopped' in container.stdout %}
✅ Good restart policy for Swarm
{% else %}
⚠️ Consider adding restart policy
{% endif %}
{% if 'NetworkMode: bridge' in container.stdout or 'NetworkMode: host' in container.stdout %}
⚠️ May need network configuration for Swarm
{% else %}
✅ Custom network - good for Swarm
{% endif %}
{% if '/var/run/docker.sock' in container.stdout %}
❌ Uses Docker socket - NOT suitable for Swarm
{% elif 'bind' in container.stdout %}
⚠️ Uses bind mounts - consider using volumes
{% else %}
✅ Good volume configuration
{% endif %}
{% endfor %}
💡 RECOMMENDATIONS:
SUITABLE FOR SWARM:
{% for container in container_details.results %}
{% if '/var/run/docker.sock' not in container.stdout %}
- {{ container.item }}: Ready for Swarm migration
{% endif %}
{% endfor %}
NEEDS MODIFICATION:
{% for container in container_details.results %}
{% if '/var/run/docker.sock' in container.stdout %}
- {{ container.item }}: Uses Docker socket - keep as standalone
{% elif 'bind' in container.stdout %}
- {{ container.item }}: Convert bind mounts to volumes
{% endif %}
{% endfor %}
NEXT STEPS:
1. Initialize Swarm: docker swarm init
2. Create overlay networks for services
3. Convert suitable containers to services
4. Set up service discovery and load balancing
5. Configure secrets and configs management
when: container_details is defined
- name: Display Swarm analysis
debug:
msg: "{{ swarm_analysis }}"
when: swarm_analysis is defined
# 生成迁移脚本建议
- name: Generate migration script suggestions
set_fact:
migration_script: |
#!/bin/bash
# Docker Swarm Migration Script for {{ inventory_hostname }}
# Generated on {{ ansible_date_time.iso8601 }}
echo "🚀 Starting Docker Swarm migration..."
# Initialize Swarm (if not already done)
if [ "{{ swarm_status.stdout }}" != "active" ]; then
echo "Initializing Docker Swarm..."
docker swarm init
fi
# Create overlay networks
echo "Creating overlay networks..."
docker network create -d overlay --attachable app-network
# Example service creation (modify as needed)
{% for container in container_details.results if container_details is defined %}
{% if '/var/run/docker.sock' not in container.stdout %}
echo "Converting {{ container.item }} to Swarm service..."
# docker service create --name {{ container.item }}-svc \
# --network app-network \
# --replicas 1 \
# [ADD_YOUR_SPECIFIC_OPTIONS] \
# [IMAGE_NAME]
{% endif %}
{% endfor %}
echo "✅ Migration script template generated!"
echo "Please review and customize before running."
when: container_details is defined
- name: Display migration script
debug:
msg: "{{ migration_script }}"
when: migration_script is defined
# 保存分析结果到文件
- name: Save analysis results to file
copy:
content: |
{{ swarm_analysis }}
MIGRATION SCRIPT:
{{ migration_script }}
dest: "/tmp/swarm-analysis-{{ inventory_hostname }}-{{ ansible_date_time.epoch }}.txt"
when: swarm_analysis is defined and migration_script is defined
- name: Analysis complete
debug:
msg: |
🎉 Analysis complete!
Results saved to: /tmp/swarm-analysis-{{ inventory_hostname }}-{{ ansible_date_time.epoch }}.txt
Summary:
- Total containers analyzed: {{ all_containers.stdout_lines | length }}
- Compose files found: {{ compose_files.files | length }}
- Current Swarm status: {{ swarm_status.stdout }}

View File

@@ -1,236 +0,0 @@
---
- name: Docker Swarm Check for ash3c
hosts: ash3c
become: yes
gather_facts: yes
tasks:
# 基础检查
- name: Check if Docker is installed
command: which docker
register: docker_installed
failed_when: false
changed_when: false
- name: Fail if Docker not installed
fail:
msg: "Docker is not installed on {{ inventory_hostname }}"
when: docker_installed.rc != 0
# 检查当前 Swarm 状态
- name: Check Docker Swarm status
shell: docker info | grep "Swarm:" -A 1
register: swarm_status
- name: Display current Swarm status
debug:
msg: "🔍 Current Swarm Status: {{ swarm_status.stdout_lines }}"
# 获取运行中的容器 - 使用简单格式
- name: Get running containers
shell: docker ps
register: running_containers
- name: Display running containers
debug:
msg: "🏃 Running Containers:\n{{ running_containers.stdout }}"
# 获取所有容器(包括停止的)
- name: Get all containers
shell: docker ps -a
register: all_containers
- name: Display all containers
debug:
msg: "📦 All Containers:\n{{ all_containers.stdout }}"
# 获取容器名称列表
- name: Get container names
shell: docker ps -a | awk 'NR>1 {print $NF}' | head -20
register: container_names
- name: Display container names
debug:
msg: "Container names: {{ container_names.stdout_lines }}"
# 检查每个容器的基本信息
- name: Get basic container info
shell: |
echo "=== Container: {{ item }} ==="
docker inspect {{ item }} | jq -r '.[0] | {
"Image": .Config.Image,
"Status": .State.Status,
"RestartPolicy": .HostConfig.RestartPolicy.Name,
"NetworkMode": .HostConfig.NetworkMode
}'
echo "Ports:"
docker port {{ item }} 2>/dev/null || echo "No published ports"
echo "Mounts:"
docker inspect {{ item }} | jq -r '.[0].Mounts[]? | " \(.Source):\(.Destination) (\(.Mode))"'
echo "---"
register: container_info
loop: "{{ container_names.stdout_lines[:10] }}" # 限制前10个容器
when: container_names.stdout_lines | length > 0
- name: Display container info
debug:
msg: "{{ item.stdout }}"
loop: "{{ container_info.results }}"
when: container_info is defined
# 检查 Docker Compose 文件
- name: Find docker-compose files in common locations
find:
paths:
- /root
- /home
- /opt
- /var/lib/docker
patterns:
- "docker-compose.yml"
- "docker-compose.yaml"
- "compose.yml"
- "compose.yaml"
recurse: yes
depth: 3
register: compose_files
ignore_errors: yes
- name: Display found compose files
debug:
msg: "📄 Found compose files: {{ compose_files.files | map(attribute='path') | list }}"
when: compose_files.files | length > 0
# 分析网络配置
- name: Get Docker networks
shell: docker network ls
register: docker_networks
- name: Display Docker networks
debug:
msg: "🌐 Docker Networks:\n{{ docker_networks.stdout }}"
# 检查卷使用情况
- name: Get Docker volumes
shell: docker volume ls
register: docker_volumes
- name: Display Docker volumes
debug:
msg: "💾 Docker Volumes:\n{{ docker_volumes.stdout }}"
# 检查容器资源使用
- name: Get container resource usage
shell: docker stats --no-stream
register: container_stats
when: container_names.stdout_lines | length > 0
- name: Display container stats
debug:
msg: "📊 Container Resource Usage:\n{{ container_stats.stdout }}"
when: container_stats is defined
# 检查 Docker 镜像
- name: Get Docker images
shell: docker images
register: docker_images
- name: Display Docker images
debug:
msg: "🖼️ Docker Images:\n{{ docker_images.stdout }}"
# 生成 Swarm 适用性分析
- name: Generate Swarm suitability analysis
debug:
msg: |
🔍 DOCKER SWARM MIGRATION ANALYSIS FOR {{ inventory_hostname }}
================================================================
📋 SUMMARY:
- Current Swarm Status: {{ 'Active' if 'active' in swarm_status.stdout else 'Inactive' }}
- Total Containers: {{ container_names.stdout_lines | length }}
- Running Containers: {{ running_containers.stdout_lines | length - 1 }}
- Compose Files Found: {{ compose_files.files | length if compose_files.files is defined else 0 }}
💡 SWARM MIGRATION RECOMMENDATIONS:
✅ TYPICALLY SUITABLE FOR SWARM:
- Web servers (nginx, apache, caddy)
- API services and microservices
- Application servers
- Load balancers (traefik, haproxy)
- Monitoring tools (prometheus, grafana)
- Databases (with proper volume strategy)
❌ NOT SUITABLE FOR SWARM:
- Containers using Docker socket (/var/run/docker.sock)
- Containers with --privileged flag
- Development/testing containers
- Containers requiring specific host hardware access
⚠️ NEEDS MODIFICATION FOR SWARM:
- Containers using bind mounts → convert to volumes
- Containers without restart policies → add restart policies
- Containers using host networking → use overlay networks
- Containers with hardcoded IPs → use service discovery
🚀 MIGRATION STEPS:
1. ✅ Swarm is already initialized
2. Create overlay networks for service communication
3. Convert suitable containers to Docker services
4. Set up service discovery and load balancing
5. Configure secrets and configs management
6. Test service scaling and failover
📝 NEXT ACTIONS:
- Review each container above for Swarm suitability
- Identify services that would benefit from scaling
- Plan network topology for services
- Prepare volume migration strategy
when: container_names is defined
# 保存分析结果
- name: Save analysis summary to file
copy:
content: |
Docker Swarm Analysis for {{ inventory_hostname }}
Generated: {{ ansible_date_time.iso8601 }}
SWARM STATUS:
{{ swarm_status.stdout }}
CONTAINERS ({{ container_names.stdout_lines | length }} total):
{{ container_names.stdout_lines | join('\n') }}
NETWORKS:
{{ docker_networks.stdout }}
VOLUMES:
{{ docker_volumes.stdout }}
IMAGES:
{{ docker_images.stdout }}
{% if compose_files.files is defined and compose_files.files | length > 0 %}
COMPOSE FILES FOUND:
{% for file in compose_files.files %}
- {{ file.path }}
{% endfor %}
{% endif %}
dest: "/tmp/swarm-analysis-{{ inventory_hostname }}-{{ ansible_date_time.epoch }}.txt"
- name: Analysis complete
debug:
msg: |
🎉 ANALYSIS COMPLETE!
📄 Results saved to: /tmp/swarm-analysis-{{ inventory_hostname }}-{{ ansible_date_time.epoch }}.txt
🔍 Review the container details above to identify:
- Which services are suitable for Swarm
- Which containers need modification
- Migration priority and strategy
💡 TIP: Focus on stateless services first for easier migration!

View File

@@ -1,95 +0,0 @@
---
- name: Gitea Runner Management
hosts: hcp
become: yes
vars:
gitea_runner_user: "gitea-runner"
gitea_runner_data_dir: "/var/lib/gitea-runner"
gitea_runner_log_dir: "/var/log/gitea-runner"
tasks:
- name: Check gitea-runner service status
systemd:
name: gitea-runner
register: service_status
- name: Display service status
debug:
msg: |
Service: {{ service_status.status.ActiveState }}
Enabled: {{ service_status.status.UnitFileState }}
Main PID: {{ service_status.status.MainPID | default('N/A') }}
- name: Show recent logs
command: journalctl -u gitea-runner --no-pager -n 20
register: recent_logs
changed_when: false
- name: Display recent logs
debug:
var: recent_logs.stdout_lines
- name: Check runner registration
stat:
path: "{{ gitea_runner_data_dir }}/.runner"
register: runner_registered
- name: Display registration status
debug:
msg: "Runner registered: {{ runner_registered.stat.exists }}"
- name: Show runner configuration (if registered)
command: cat {{ gitea_runner_data_dir }}/.runner
register: runner_config
become_user: "{{ gitea_runner_user }}"
when: runner_registered.stat.exists
changed_when: false
- name: Display runner configuration
debug:
var: runner_config.stdout_lines
when: runner_registered.stat.exists
- name: Check Docker access for runner user
command: docker ps
become_user: "{{ gitea_runner_user }}"
register: docker_access
changed_when: false
failed_when: false
- name: Display Docker access status
debug:
msg: |
Docker access: {{ 'OK' if docker_access.rc == 0 else 'FAILED' }}
{% if docker_access.rc != 0 %}
Error: {{ docker_access.stderr }}
{% endif %}
# 单独的任务用于管理服务
- name: Service Management Tasks
hosts: hcp
become: yes
tasks:
- name: Start gitea-runner service
systemd:
name: gitea-runner
state: started
when: ansible_run_tags is defined and 'start' in ansible_run_tags
- name: Stop gitea-runner service
systemd:
name: gitea-runner
state: stopped
when: ansible_run_tags is defined and 'stop' in ansible_run_tags
- name: Restart gitea-runner service
systemd:
name: gitea-runner
state: restarted
when: ansible_run_tags is defined and 'restart' in ansible_run_tags
- name: Reload gitea-runner service
systemd:
name: gitea-runner
state: reloaded
when: ansible_run_tags is defined and 'reload' in ansible_run_tags

View File

@@ -1,157 +0,0 @@
---
- name: Setup Gitea Runner on HCP nodes
hosts: hcp
become: yes
vars:
gitea_runner_token: "vOrrQda6Qiet9YOj4waZVU5QgLig2J3rKp2RfoN7"
gitea_server_url: "http://gitea:3000"
gitea_runner_user: "gitea-runner"
gitea_runner_home: "/home/{{ gitea_runner_user }}"
gitea_runner_config_dir: "/etc/gitea-runner"
gitea_runner_data_dir: "/var/lib/gitea-runner"
gitea_runner_log_dir: "/var/log/gitea-runner"
gitea_runner_binary: "/usr/bin/act_runner"
tasks:
- name: Check if gitea-runner binary exists
stat:
path: "{{ gitea_runner_binary }}"
register: runner_binary
- name: Fail if act_runner binary not found
fail:
msg: "Act runner binary not found at {{ gitea_runner_binary }}. Please install it first."
when: not runner_binary.stat.exists
- name: Create gitea-runner user
user:
name: "{{ gitea_runner_user }}"
system: yes
shell: /bin/bash
home: "{{ gitea_runner_home }}"
create_home: yes
comment: "Gitea Runner Service User"
- name: Create gitea-runner directories
file:
path: "{{ item }}"
state: directory
owner: "{{ gitea_runner_user }}"
group: "{{ gitea_runner_user }}"
mode: '0755'
loop:
- "{{ gitea_runner_config_dir }}"
- "{{ gitea_runner_data_dir }}"
- "{{ gitea_runner_log_dir }}"
- name: Create gitea-runner configuration file
template:
src: gitea-runner-config.yml.j2
dest: "{{ gitea_runner_config_dir }}/config.yml"
owner: "{{ gitea_runner_user }}"
group: "{{ gitea_runner_user }}"
mode: '0600'
notify: restart gitea-runner
- name: Create gitea-runner systemd service file
template:
src: gitea-runner.service.j2
dest: /etc/systemd/system/gitea-runner.service
owner: root
group: root
mode: '0644'
notify:
- reload systemd
- restart gitea-runner
- name: Create gitea-runner environment file
template:
src: gitea-runner.env.j2
dest: /etc/default/gitea-runner
owner: root
group: root
mode: '0600'
notify: restart gitea-runner
- name: Create runner registration script
template:
src: register-runner.sh.j2
dest: "{{ gitea_runner_home }}/register-runner.sh"
owner: "{{ gitea_runner_user }}"
group: "{{ gitea_runner_user }}"
mode: '0755'
- name: Check if runner is already registered
stat:
path: "{{ gitea_runner_data_dir }}/.runner"
register: runner_registered
- name: Register gitea runner
command: "{{ gitea_runner_home }}/register-runner.sh"
become_user: "{{ gitea_runner_user }}"
when: not runner_registered.stat.exists
register: registration_result
- name: Display registration result
debug:
var: registration_result.stdout_lines
when: registration_result is defined and registration_result.stdout_lines is defined
- name: Create runner startup script
template:
src: start-runner.sh.j2
dest: "{{ gitea_runner_home }}/start-runner.sh"
owner: "{{ gitea_runner_user }}"
group: "{{ gitea_runner_user }}"
mode: '0755'
- name: Create logrotate configuration for gitea-runner
template:
src: gitea-runner.logrotate.j2
dest: /etc/logrotate.d/gitea-runner
owner: root
group: root
mode: '0644'
- name: Install Docker (required for runner)
package:
name: docker.io
state: present
- name: Add gitea-runner user to docker group
user:
name: "{{ gitea_runner_user }}"
groups: docker
append: yes
- name: Start and enable Docker service
systemd:
name: docker
state: started
enabled: yes
- name: Start and enable gitea-runner service
systemd:
name: gitea-runner
state: started
enabled: yes
daemon_reload: yes
- name: Check gitea-runner service status
systemd:
name: gitea-runner
register: service_status
- name: Display service status
debug:
msg: "Gitea Runner service is {{ service_status.status.ActiveState }}"
handlers:
- name: reload systemd
systemd:
daemon_reload: yes
- name: restart gitea-runner
systemd:
name: gitea-runner
state: restarted

View File

@@ -1,194 +0,0 @@
---
- name: Docker Swarm Migration Plan for ash3c
hosts: ash3c
become: yes
gather_facts: yes
vars:
# 定义服务迁移计划
swarm_services:
high_priority:
- name: ghproxy
image: wjqserver/ghproxy:latest
ports: "8046:8080"
replicas: 2
networks: ["app-network"]
- name: redis
image: redis:latest
ports: "63789:6379"
replicas: 1
networks: ["app-network"]
volumes: ["redis-data:/data"]
medium_priority:
- name: consul
image: bitnami/consul:latest
ports:
- "8310:8300"
- "8311:8301"
- "8312:8302"
- "8501:8500"
- "8601:8600/udp"
replicas: 1
networks: ["consul-network"]
- name: discourse-app
image: bitnami/discourse:3.4.1
ports: "31080:3000"
replicas: 1
networks: ["app-network"]
depends_on: ["postgres", "redis"]
- name: discourse-sidekiq
image: bitnami/discourse:3.4.1
replicas: 1
networks: ["app-network"]
depends_on: ["postgres", "redis"]
low_priority:
- name: elasticsearch
image: bitnami/elasticsearch:8.17.2
ports: "59200:9200"
replicas: 1
networks: ["elastic-network"]
volumes: ["elastic-data:/bitnami/elasticsearch/data"]
constraints: ["node.role==manager"]
- name: postgres
image: postgres:17.2
ports: "54322:5432"
replicas: 1
networks: ["db-network"]
volumes: ["postgres-data:/var/lib/postgresql/data"]
constraints: ["node.role==manager"]
secrets: ["postgres_password"]
tasks:
- name: Display migration plan
debug:
msg: |
🚀 DOCKER SWARM MIGRATION PLAN FOR {{ inventory_hostname }}
=========================================================
📋 PHASE 1 - HIGH PRIORITY (Low Risk)
{% for service in swarm_services.high_priority %}
✅ {{ service.name }}:
- Image: {{ service.image }}
- Replicas: {{ service.replicas }}
- Networks: {{ service.networks | join(', ') }}
- Migration: Safe, stateless service
{% endfor %}
📋 PHASE 2 - MEDIUM PRIORITY (Medium Risk)
{% for service in swarm_services.medium_priority %}
⚠️ {{ service.name }}:
- Image: {{ service.image }}
- Replicas: {{ service.replicas }}
- Networks: {{ service.networks | join(', ') }}
- Migration: Requires coordination
{% endfor %}
📋 PHASE 3 - LOW PRIORITY (High Risk)
{% for service in swarm_services.low_priority %}
🔴 {{ service.name }}:
- Image: {{ service.image }}
- Replicas: {{ service.replicas }}
- Networks: {{ service.networks | join(', ') }}
- Migration: Requires careful planning
{% endfor %}
- name: Create migration script
copy:
content: |
#!/bin/bash
# Docker Swarm Migration Script for {{ inventory_hostname }}
# Generated: {{ ansible_date_time.iso8601 }}
set -e
echo "🚀 Starting Docker Swarm Migration..."
# Create networks
echo "📡 Creating overlay networks..."
docker network create -d overlay --attachable app-network || true
docker network create -d overlay --attachable db-network || true
docker network create -d overlay --attachable consul-network || true
docker network create -d overlay --attachable elastic-network || true
# Create volumes
echo "💾 Creating volumes..."
docker volume create redis-data || true
docker volume create postgres-data || true
docker volume create elastic-data || true
# Create secrets (example)
echo "🔐 Creating secrets..."
echo "your_postgres_password" | docker secret create postgres_password - || true
echo "✅ Infrastructure setup complete!"
echo ""
echo "🔄 PHASE 1 - Migrate high priority services:"
echo "docker service create --name ghproxy-svc --replicas 2 --network app-network -p 8046:8080 wjqserver/ghproxy:latest"
echo "docker service create --name redis-svc --replicas 1 --network app-network -p 63789:6379 --mount type=volume,source=redis-data,target=/data redis:latest"
echo ""
echo "🔄 PHASE 2 - Migrate medium priority services:"
echo "docker service create --name consul-svc --replicas 1 --network consul-network -p 8310:8300 -p 8311:8301 -p 8312:8302 -p 8501:8500 -p 8601:8600/udp bitnami/consul:latest"
echo "docker service create --name discourse-app-svc --replicas 1 --network app-network -p 31080:3000 bitnami/discourse:3.4.1"
echo "docker service create --name discourse-sidekiq-svc --replicas 1 --network app-network bitnami/discourse:3.4.1"
echo ""
echo "🔄 PHASE 3 - Migrate low priority services (CAREFUL!):"
echo "docker service create --name postgres-svc --replicas 1 --network db-network -p 54322:5432 --mount type=volume,source=postgres-data,target=/var/lib/postgresql/data --secret postgres_password --constraint 'node.role==manager' postgres:17.2"
echo "docker service create --name elasticsearch-svc --replicas 1 --network elastic-network -p 59200:9200 --mount type=volume,source=elastic-data,target=/bitnami/elasticsearch/data --constraint 'node.role==manager' bitnami/elasticsearch:8.17.2"
echo ""
echo "📊 Monitor services:"
echo "docker service ls"
echo "docker service ps <service-name>"
echo ""
echo "⚠️ IMPORTANT NOTES:"
echo "1. Stop original containers before creating services"
echo "2. Backup data before migrating databases"
echo "3. Test each phase before proceeding"
echo "4. Monitor logs: docker service logs <service-name>"
dest: "/tmp/swarm-migration-{{ inventory_hostname }}.sh"
mode: '0755'
- name: Create rollback script
copy:
content: |
#!/bin/bash
# Docker Swarm Rollback Script for {{ inventory_hostname }}
echo "🔄 Rolling back Swarm services..."
# Remove services
docker service rm ghproxy-svc redis-svc consul-svc discourse-app-svc discourse-sidekiq-svc postgres-svc elasticsearch-svc 2>/dev/null || true
# Remove networks (optional)
# docker network rm app-network db-network consul-network elastic-network 2>/dev/null || true
echo "✅ Rollback complete. Original containers should be restarted manually."
dest: "/tmp/swarm-rollback-{{ inventory_hostname }}.sh"
mode: '0755'
- name: Migration plan complete
debug:
msg: |
🎉 MIGRATION PLAN GENERATED!
📄 Files created:
- /tmp/swarm-migration-{{ inventory_hostname }}.sh (Migration script)
- /tmp/swarm-rollback-{{ inventory_hostname }}.sh (Rollback script)
🚀 RECOMMENDED APPROACH:
1. Backup all data first
2. Test migration in phases
3. Start with Phase 1 (low risk services)
4. Monitor each service before proceeding
5. Keep rollback script ready
💡 NEXT STEPS:
1. Review and customize the migration script
2. Plan maintenance window
3. Execute phase by phase
4. Monitor and validate each service

View File

@@ -1,50 +0,0 @@
# Gitea Runner Configuration
log:
level: info
file: {{ gitea_runner_log_dir }}/runner.log
runner:
# Runner name (will be auto-generated if not specified)
name: "{{ inventory_hostname }}-runner"
# Runner capacity (number of concurrent jobs)
capacity: 2
# Runner timeout
timeout: 3600
# Runner labels (for job targeting)
labels:
- "ubuntu-latest:docker://ubuntu:22.04"
- "ubuntu-20.04:docker://ubuntu:20.04"
- "ubuntu-18.04:docker://ubuntu:18.04"
- "node:docker://node:18"
- "python:docker://python:3.11"
- "ansible:docker://quay.io/ansible/ansible-runner:latest"
- "opentofu:docker://opentofu/opentofu:latest"
cache:
enabled: true
dir: {{ gitea_runner_data_dir }}/cache
host: ""
port: 0
container:
# Docker network for runner containers
network: "gitea-runner"
# Enable privileged containers (needed for Docker-in-Docker)
privileged: false
# Container options
options: "--rm --pull=always"
# Valid platforms
valid_volumes:
- "/tmp"
- "{{ gitea_runner_data_dir }}"
docker_host: "unix:///var/run/docker.sock"
host:
workdir_parent: {{ gitea_runner_data_dir }}/work

View File

@@ -1,18 +0,0 @@
# Gitea Runner Environment Variables
# Gitea server configuration
GITEA_INSTANCE_URL={{ gitea_server_url }}
GITEA_RUNNER_REGISTRATION_TOKEN={{ gitea_runner_token }}
# Runner configuration
GITEA_RUNNER_NAME={{ inventory_hostname }}-runner
GITEA_RUNNER_LABELS=ubuntu-latest,ubuntu-20.04,ubuntu-18.04,node,python,ansible,opentofu
# Docker configuration
DOCKER_HOST=unix:///var/run/docker.sock
# Logging
GITEA_RUNNER_LOG_LEVEL=info
# Security
GITEA_RUNNER_SECURITY_PRIVILEGED=false

View File

@@ -1,12 +0,0 @@
{{ gitea_runner_log_dir }}/*.log {
daily
missingok
rotate 30
compress
delaycompress
notifempty
create 644 {{ gitea_runner_user }} {{ gitea_runner_user }}
postrotate
systemctl reload gitea-runner || true
endscript
}

View File

@@ -1,39 +0,0 @@
[Unit]
Description=Gitea Actions Runner
Documentation=https://docs.gitea.io/en-us/actions/
After=network.target docker.service
Wants=docker.service
[Service]
Type=simple
User={{ gitea_runner_user }}
Group={{ gitea_runner_user }}
WorkingDirectory={{ gitea_runner_data_dir }}
ExecStart={{ gitea_runner_binary }} daemon --config {{ gitea_runner_config_dir }}/config.yml
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
KillSignal=SIGINT
TimeoutStopSec=5
Restart=always
RestartSec=10
StartLimitInterval=0
# Security settings
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ProtectHome=yes
ReadWritePaths={{ gitea_runner_data_dir }} {{ gitea_runner_log_dir }} /var/run/docker.sock
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
# Environment
EnvironmentFile=-/etc/default/gitea-runner
# Logging
StandardOutput=append:{{ gitea_runner_log_dir }}/gitea-runner.log
StandardError=append:{{ gitea_runner_log_dir }}/gitea-runner-error.log
[Install]
WantedBy=multi-user.target

View File

@@ -1,46 +0,0 @@
#!/bin/bash
# Gitea Runner Registration Script
set -e
echo "🚀 注册 Gitea Runner..."
# 配置变量
GITEA_URL="{{ gitea_server_url }}"
REGISTRATION_TOKEN="{{ gitea_runner_token }}"
RUNNER_NAME="{{ inventory_hostname }}-runner"
RUNNER_LABELS="ubuntu-latest,ubuntu-20.04,ubuntu-18.04,node,python,ansible,opentofu"
# 切换到数据目录
cd {{ gitea_runner_data_dir }}
# 检查是否已经注册
if [ -f ".runner" ]; then
echo "✅ Runner 已经注册"
exit 0
fi
echo "📝 注册 Runner: $RUNNER_NAME"
echo "🔗 Gitea URL: $GITEA_URL"
echo "🏷️ Labels: $RUNNER_LABELS"
# 注册 Runner
{{ gitea_runner_binary }} register \
--instance "$GITEA_URL" \
--token "$REGISTRATION_TOKEN" \
--name "$RUNNER_NAME" \
--labels "$RUNNER_LABELS"
if [ $? -eq 0 ]; then
echo "✅ Runner 注册成功!"
# 设置文件权限
chown {{ gitea_runner_user }}:{{ gitea_runner_user }} .runner .credentials
chmod 600 .runner .credentials
echo "📋 Runner 信息:"
cat .runner
else
echo "❌ Runner 注册失败"
exit 1
fi

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# Gitea Runner Startup Script
set -e
echo "🚀 启动 Gitea Runner..."
# 切换到数据目录
cd {{ gitea_runner_data_dir }}
# 检查注册状态
if [ ! -f ".runner" ]; then
echo "❌ Runner 未注册,请先运行注册脚本"
exit 1
fi
echo "✅ Runner 已注册,启动守护进程..."
# 启动 Runner
exec {{ gitea_runner_binary }} daemon --config {{ gitea_runner_config_dir }}/config.yml

30
configuration/proxy.env Normal file
View File

@@ -0,0 +1,30 @@
# Proxy Configuration for istoreos.tailnet-68f9.ts.net:1082
# This file contains proxy environment variables for the management system
# HTTP/HTTPS Proxy Settings
export http_proxy=http://istoreos.tailnet-68f9.ts.net:1082
export https_proxy=http://istoreos.tailnet-68f9.ts.net:1082
export HTTP_PROXY=http://istoreos.tailnet-68f9.ts.net:1082
export HTTPS_PROXY=http://istoreos.tailnet-68f9.ts.net:1082
# No Proxy Settings (local networks and services)
export no_proxy=localhost,127.0.0.1,::1,.local,.tailnet-68f9.ts.net
export NO_PROXY=localhost,127.0.0.1,::1,.local,.tailnet-68f9.ts.net
# Additional proxy settings for various tools
export ALL_PROXY=http://istoreos.tailnet-68f9.ts.net:1082
export all_proxy=http://istoreos.tailnet-68f9.ts.net:1082
# Docker proxy settings
export DOCKER_BUILDKIT=1
export BUILDKIT_PROGRESS=plain
# Git proxy settings
export GIT_HTTP_PROXY=http://istoreos.tailnet-68f9.ts.net:1082
export GIT_HTTPS_PROXY=http://istoreos.tailnet-68f9.ts.net:1082
# Curl proxy settings
export CURL_PROXY=http://istoreos.tailnet-68f9.ts.net:1082
# Wget proxy settings
export WGET_PROXY=http://istoreos.tailnet-68f9.ts.net:1082

View File

@@ -1,50 +0,0 @@
# Gitea Runner Configuration
log:
level: info
file: {{ gitea_runner_log_dir }}/runner.log
runner:
# Runner name (will be auto-generated if not specified)
name: "{{ inventory_hostname }}-runner"
# Runner capacity (number of concurrent jobs)
capacity: 2
# Runner timeout
timeout: 3600
# Runner labels (for job targeting)
labels:
- "ubuntu-latest:docker://ubuntu:22.04"
- "ubuntu-20.04:docker://ubuntu:20.04"
- "ubuntu-18.04:docker://ubuntu:18.04"
- "node:docker://node:18"
- "python:docker://python:3.11"
- "ansible:docker://quay.io/ansible/ansible-runner:latest"
- "opentofu:docker://opentofu/opentofu:latest"
cache:
enabled: true
dir: {{ gitea_runner_data_dir }}/cache
host: ""
port: 0
container:
# Docker network for runner containers
network: "gitea-runner"
# Enable privileged containers (needed for Docker-in-Docker)
privileged: false
# Container options
options: "--rm --pull=always"
# Valid platforms
valid_volumes:
- "/tmp"
- "{{ gitea_runner_data_dir }}"
docker_host: "unix:///var/run/docker.sock"
host:
workdir_parent: {{ gitea_runner_data_dir }}/work

View File

@@ -1,18 +0,0 @@
# Gitea Runner Environment Variables
# Gitea server configuration
GITEA_INSTANCE_URL={{ gitea_server_url }}
GITEA_RUNNER_REGISTRATION_TOKEN={{ gitea_runner_token }}
# Runner configuration
GITEA_RUNNER_NAME={{ inventory_hostname }}-runner
GITEA_RUNNER_LABELS=ubuntu-latest,ubuntu-20.04,ubuntu-18.04,node,python,ansible,opentofu
# Docker configuration
DOCKER_HOST=unix:///var/run/docker.sock
# Logging
GITEA_RUNNER_LOG_LEVEL=info
# Security
GITEA_RUNNER_SECURITY_PRIVILEGED=false

View File

@@ -1,12 +0,0 @@
{{ gitea_runner_log_dir }}/*.log {
daily
missingok
rotate 30
compress
delaycompress
notifempty
create 644 {{ gitea_runner_user }} {{ gitea_runner_user }}
postrotate
systemctl reload gitea-runner || true
endscript
}

View File

@@ -1,39 +0,0 @@
[Unit]
Description=Gitea Actions Runner
Documentation=https://docs.gitea.io/en-us/actions/
After=network.target docker.service
Wants=docker.service
[Service]
Type=simple
User={{ gitea_runner_user }}
Group={{ gitea_runner_user }}
WorkingDirectory={{ gitea_runner_data_dir }}
ExecStart={{ gitea_runner_binary }} daemon --config {{ gitea_runner_config_dir }}/config.yml
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
KillSignal=SIGINT
TimeoutStopSec=5
Restart=always
RestartSec=10
StartLimitInterval=0
# Security settings
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ProtectHome=yes
ReadWritePaths={{ gitea_runner_data_dir }} {{ gitea_runner_log_dir }} /var/run/docker.sock
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
# Environment
EnvironmentFile=-/etc/default/gitea-runner
# Logging
StandardOutput=append:{{ gitea_runner_log_dir }}/gitea-runner.log
StandardError=append:{{ gitea_runner_log_dir }}/gitea-runner-error.log
[Install]
WantedBy=multi-user.target

View File

@@ -1,46 +0,0 @@
#!/bin/bash
# Gitea Runner Registration Script
set -e
echo "🚀 注册 Gitea Runner..."
# 配置变量
GITEA_URL="{{ gitea_server_url }}"
REGISTRATION_TOKEN="{{ gitea_runner_token }}"
RUNNER_NAME="{{ inventory_hostname }}-runner"
RUNNER_LABELS="ubuntu-latest,ubuntu-20.04,ubuntu-18.04,node,python,ansible,opentofu"
# 切换到数据目录
cd {{ gitea_runner_data_dir }}
# 检查是否已经注册
if [ -f ".runner" ]; then
echo "✅ Runner 已经注册"
exit 0
fi
echo "📝 注册 Runner: $RUNNER_NAME"
echo "🔗 Gitea URL: $GITEA_URL"
echo "🏷️ Labels: $RUNNER_LABELS"
# 注册 Runner
{{ gitea_runner_binary }} register \
--instance "$GITEA_URL" \
--token "$REGISTRATION_TOKEN" \
--name "$RUNNER_NAME" \
--labels "$RUNNER_LABELS"
if [ $? -eq 0 ]; then
echo "✅ Runner 注册成功!"
# 设置文件权限
chown {{ gitea_runner_user }}:{{ gitea_runner_user }} .runner .credentials
chmod 600 .runner .credentials
echo "📋 Runner 信息:"
cat .runner
else
echo "❌ Runner 注册失败"
exit 1
fi

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# Gitea Runner Startup Script
set -e
echo "🚀 启动 Gitea Runner..."
# 切换到数据目录
cd {{ gitea_runner_data_dir }}
# 检查注册状态
if [ ! -f ".runner" ]; then
echo "❌ Runner 未注册,请先运行注册脚本"
exit 1
fi
echo "✅ Runner 已注册,启动守护进程..."
# 启动 Runner
exec {{ gitea_runner_binary }} daemon --config {{ gitea_runner_config_dir }}/config.yml

View File

@@ -1,202 +0,0 @@
# ZSH 配置同步方案
这个目录包含了完整的 oh-my-zsh 配置,可以在多个 VPS 之间同步使用。
## 文件结构
```
configuration/zsh/
├── README.md # 本文件
├── install-zsh-config.sh # 完整安装脚本
├── quick-install.sh # 快速安装脚本
├── zshrc.template # ZSH 配置文件模板
└── oh-my-zsh-custom/ # 自定义 oh-my-zsh 配置
├── aliases.zsh # 自定义别名
└── plugins/ # 自定义插件
```
## 使用方法
### 方法一:智能安装(推荐)
询问用户是否使用代理,安装完成后可选择是否保持:
```bash
# 智能安装(询问代理使用,安装后可选择是否保持)
curl -fsSL https://ben:8d7d70f324796be650b79415303c31f567bf459b@gitea.tailnet-68f9.ts.net/ben/mgmt/raw/branch/main/configuration/zsh/smart-install.sh | bash
```
**特点:**
- 安装前询问是否使用代理
- 测试代理连接确保可用
- 安装完成后询问是否保持代理
- 用户完全控制代理使用
### 方法二:快速安装
在新 VPS 上运行:
```bash
# 一键安装
curl -fsSL https://ben:8d7d70f324796be650b79415303c31f567bf459b@gitea.tailnet-68f9.ts.net/ben/mgmt/raw/branch/main/configuration/zsh/quick-install.sh | bash
```
### 方法三:手动安装
1. 克隆仓库:
```bash
git clone https://ben:8d7d70f324796be650b79415303c31f567bf459b@gitea.tailnet-68f9.ts.net/ben/mgmt.git /root/mgmt
```
2. 运行安装脚本:
```bash
cd /root/mgmt
chmod +x configuration/zsh/install-zsh-config.sh
./configuration/zsh/install-zsh-config.sh
```
## 配置同步
安装完成后,可以使用以下命令同步最新配置:
```bash
# 同步配置
sync-zsh-config
```
这个命令会:
1. 从 Gitea 拉取最新配置
2. 备份当前配置
3. 部署新配置
## 代理管理
如果网络环境需要代理,可以使用以下命令管理代理:
```bash
# 代理管理命令
proxy-on # 临时开启代理
proxy-off # 临时关闭代理
proxy-toggle # 切换代理状态
proxy-enable # 永久开启代理
proxy-disable # 永久关闭代理
proxy-status # 查看代理状态
proxy-test # 测试代理连接
```
### 代理使用场景
- **临时使用**: `proxy-on` → 使用代理 → `proxy-off`
- **永久开启**: `proxy-enable` → 重启后仍然有效
- **快速切换**: `proxy-toggle` → 一键切换状态
- **状态检查**: `proxy-status` → 查看当前状态和IP
## 包含的功能
### 插件
- **git** - Git 集成
- **docker** - Docker 支持
- **ansible** - Ansible 支持
- **terraform** - OpenTofu/Terraform 支持
- **kubectl** - Kubernetes 支持
- **zsh-autosuggestions** - 命令自动建议
- **zsh-syntax-highlighting** - 语法高亮
- **zsh-completions** - 增强补全
### 别名
- **项目管理**: `mgmt-status`, `mgmt-deploy`, `mgmt-cleanup`
- **Ansible**: `ansible-check`, `ansible-deploy`, `ansible-ping`
- **OpenTofu**: `tofu-init`, `tofu-plan`, `tofu-apply`
- **Docker**: `dps`, `dex`, `dlog`, `dclean`
- **Kubernetes**: `k`, `kgp`, `kgs`, `kaf`
- **Git**: `gs`, `ga`, `gc`, `gp`, `gl`
- **系统**: `ll`, `la`, `ports`, `myip`
### 主题
- **agnoster** - 功能丰富的主题,显示 Git 状态
## 更新配置
当您在主 VPS 上更新配置后:
1. 提交更改:
```bash
cd /root/mgmt
git add configuration/zsh/
git commit -m "Update zsh configuration"
git push origin main
```
2. 在其他 VPS 上同步:
```bash
sync-zsh-config
```
## 自定义配置
如果您需要在特定 VPS 上添加自定义配置:
1. 编辑 `~/.zshrc` 文件
2. 在文件末尾添加您的自定义配置
3. 这些配置不会被同步脚本覆盖
## 故障排除
### 如果自动建议插件不工作
```bash
# 运行测试脚本检查插件状态
chmod +x /root/mgmt/configuration/zsh/test-plugins.sh
/root/mgmt/configuration/zsh/test-plugins.sh
# 手动安装缺失的插件
cd ~/.oh-my-zsh/custom/plugins
git clone https://github.com/zsh-users/zsh-autosuggestions
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git
git clone https://github.com/zsh-users/zsh-completions
# 重新加载配置
source ~/.zshrc
```
### 如果同步失败
```bash
# 检查网络连接
ping gitea.tailnet-68f9.ts.net
# 手动拉取
cd /root/mgmt
git pull origin main
```
### 如果别名不工作
```bash
# 重新加载配置
source ~/.zshrc
# 检查别名
alias | grep <alias-name>
```
### 如果插件不工作
```bash
# 检查插件目录
ls ~/.oh-my-zsh/plugins/
ls ~/.oh-my-zsh/custom/plugins/
# 运行测试脚本
/root/mgmt/configuration/zsh/test-plugins.sh
```
## 安全说明
- 此配置包含访问 Gitea 的凭据
- 请确保只在可信的 VPS 上使用
- 建议定期更新访问令牌
## 支持
如有问题,请检查:
1. 网络连接是否正常
2. Git 凭据是否正确
3. 依赖包是否已安装
4. 权限是否正确

View File

@@ -1,281 +0,0 @@
#!/bin/bash
# ZSH 配置安装脚本
# 用于在其他 VPS 上安装和同步 oh-my-zsh 配置
set -euo pipefail
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# 日志函数
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# 检查是否为 root 用户
check_root() {
if [[ $EUID -ne 0 ]]; then
log_error "此脚本需要 root 权限运行"
exit 1
fi
}
# 设置代理(如果需要)
setup_proxy() {
log_info "检查代理设置..."
# 检查是否已经有代理配置
if [[ -f "/root/mgmt/configuration/proxy.env" ]]; then
log_info "发现代理配置文件,加载代理设置..."
source "/root/mgmt/configuration/proxy.env"
# 测试代理连接
if curl -s --connect-timeout 5 --proxy "$http_proxy" https://httpbin.org/ip >/dev/null 2>&1; then
log_success "代理连接正常,将使用代理下载"
else
log_warning "代理连接失败,将使用直连"
unset http_proxy https_proxy HTTP_PROXY HTTPS_PROXY
fi
else
log_info "未发现代理配置,将使用直连"
fi
}
# 安装依赖
install_dependencies() {
log_info "安装依赖包..."
# 更新包列表
apt update
# 安装必要的包
apt install -y \
zsh \
git \
curl \
wget \
htop \
tree \
jq \
tmux \
fonts-powerline \
fontconfig
log_success "依赖包安装完成"
}
# 安装 oh-my-zsh
install_oh_my_zsh() {
log_info "安装 oh-my-zsh..."
if [[ -d "$HOME/.oh-my-zsh" ]]; then
log_warning "oh-my-zsh 已安装,跳过安装步骤"
return 0
fi
# 安装 oh-my-zsh
RUNZSH=no CHSH=no sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
log_success "oh-my-zsh 安装完成"
}
# 安装自定义插件
install_custom_plugins() {
log_info "安装自定义插件..."
local custom_dir="$HOME/.oh-my-zsh/custom/plugins"
# zsh-autosuggestions
if [[ ! -d "$custom_dir/zsh-autosuggestions" ]]; then
log_info "安装 zsh-autosuggestions..."
git clone https://github.com/zsh-users/zsh-autosuggestions "$custom_dir/zsh-autosuggestions"
fi
# zsh-syntax-highlighting
if [[ ! -d "$custom_dir/zsh-syntax-highlighting" ]]; then
log_info "安装 zsh-syntax-highlighting..."
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git "$custom_dir/zsh-syntax-highlighting"
fi
# zsh-completions
if [[ ! -d "$custom_dir/zsh-completions" ]]; then
log_info "安装 zsh-completions..."
git clone https://github.com/zsh-users/zsh-completions "$custom_dir/zsh-completions"
fi
log_success "自定义插件安装完成"
}
# 部署配置文件
deploy_configs() {
log_info "部署配置文件..."
local script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# 备份现有配置
if [[ -f "$HOME/.zshrc" ]]; then
log_info "备份现有 .zshrc..."
cp "$HOME/.zshrc" "$HOME/.zshrc.backup.$(date +%Y%m%d_%H%M%S)"
fi
# 部署 .zshrc
if [[ -f "$script_dir/zshrc.template" ]]; then
log_info "部署 .zshrc 配置..."
cp "$script_dir/zshrc.template" "$HOME/.zshrc"
else
log_error "找不到 zshrc.template 文件"
exit 1
fi
# 部署自定义配置
if [[ -d "$script_dir/oh-my-zsh-custom" ]]; then
log_info "部署自定义 oh-my-zsh 配置..."
# 复制自定义别名文件
if [[ -f "$script_dir/oh-my-zsh-custom/aliases.zsh" ]]; then
cp "$script_dir/oh-my-zsh-custom/aliases.zsh" "$HOME/.oh-my-zsh/custom/"
fi
fi
log_success "配置文件部署完成"
}
# 设置默认 shell
set_default_shell() {
log_info "设置 zsh 为默认 shell..."
# 检查 zsh 是否在 /etc/shells 中
if ! grep -q "$(which zsh)" /etc/shells; then
log_info "添加 zsh 到 /etc/shells..."
echo "$(which zsh)" >> /etc/shells
fi
# 设置默认 shell
chsh -s "$(which zsh)"
log_success "默认 shell 设置为 zsh"
}
# 创建同步脚本
create_sync_script() {
log_info "创建同步脚本..."
cat > /usr/local/bin/sync-zsh-config << 'EOF'
#!/bin/bash
# ZSH 配置同步脚本
# 从 Gitea 仓库拉取最新配置
set -euo pipefail
MGMT_DIR="/root/mgmt"
ZSH_CONFIG_DIR="$MGMT_DIR/configuration/zsh"
log_info() {
echo -e "\033[0;34m[INFO]\033[0m $1"
}
log_success() {
echo -e "\033[0;32m[SUCCESS]\033[0m $1"
}
log_error() {
echo -e "\033[0;31m[ERROR]\033[0m $1"
}
# 检查 mgmt 目录是否存在
if [[ ! -d "$MGMT_DIR" ]]; then
log_error "mgmt 目录不存在: $MGMT_DIR"
exit 1
fi
# 进入 mgmt 目录
cd "$MGMT_DIR"
# 拉取最新配置
log_info "拉取最新配置..."
git pull origin main
# 检查 zsh 配置目录
if [[ ! -d "$ZSH_CONFIG_DIR" ]]; then
log_error "zsh 配置目录不存在: $ZSH_CONFIG_DIR"
exit 1
fi
# 备份当前配置
if [[ -f "$HOME/.zshrc" ]]; then
log_info "备份当前配置..."
cp "$HOME/.zshrc" "$HOME/.zshrc.backup.$(date +%Y%m%d_%H%M%S)"
fi
# 部署新配置
log_info "部署新配置..."
cp "$ZSH_CONFIG_DIR/zshrc.template" "$HOME/.zshrc"
# 部署自定义别名
if [[ -f "$ZSH_CONFIG_DIR/oh-my-zsh-custom/aliases.zsh" ]]; then
cp "$ZSH_CONFIG_DIR/oh-my-zsh-custom/aliases.zsh" "$HOME/.oh-my-zsh/custom/"
fi
log_success "ZSH 配置同步完成!"
log_info "请运行 'source ~/.zshrc' 或重新登录以应用新配置"
EOF
chmod +x /usr/local/bin/sync-zsh-config
log_success "同步脚本创建完成: /usr/local/bin/sync-zsh-config"
}
# 显示使用说明
show_usage() {
log_success "ZSH 配置安装完成!"
echo ""
log_info "使用方法:"
echo " 1. 重新登录或运行: source ~/.zshrc"
echo " 2. 同步配置: sync-zsh-config"
echo " 3. 查看别名: alias"
echo ""
log_info "可用命令:"
echo " - mgmt-status, mgmt-deploy, mgmt-cleanup"
echo " - ansible-check, ansible-deploy, ansible-ping"
echo " - tofu-init, tofu-plan, tofu-apply"
echo " - dps, dex, dlog (Docker)"
echo " - k, kgp, kgs (Kubernetes)"
echo ""
}
# 主函数
main() {
log_info "开始安装 ZSH 配置..."
check_root
setup_proxy
install_dependencies
install_oh_my_zsh
install_custom_plugins
deploy_configs
set_default_shell
create_sync_script
show_usage
log_success "安装完成!"
}
# 运行主函数
main "$@"

View File

@@ -1,251 +0,0 @@
# =============================================================================
# CUSTOM ALIASES FOR MANAGEMENT SYSTEM
# =============================================================================
# Project Management
alias mgmt='cd /root/mgmt'
alias mgmt-status='cd /root/mgmt && ./mgmt.sh status'
alias mgmt-deploy='cd /root/mgmt && ./mgmt.sh deploy'
alias mgmt-cleanup='cd /root/mgmt && ./mgmt.sh cleanup'
alias mgmt-swarm='cd /root/mgmt && ./mgmt.sh swarm'
alias mgmt-tofu='cd /root/mgmt && ./mgmt.sh tofu'
# Ansible Management
alias ansible-check='cd /root/mgmt/configuration && ansible-playbook --syntax-check'
alias ansible-deploy='cd /root/mgmt/configuration && ansible-playbook -i inventories/production/inventory.ini'
alias ansible-ping='cd /root/mgmt/configuration && ansible -i inventories/production/inventory.ini all -m ping'
alias ansible-vault='cd /root/mgmt/configuration && ansible-vault'
alias ansible-galaxy='cd /root/mgmt/configuration && ansible-galaxy'
# OpenTofu/Terraform Management
alias tofu-init='cd /root/mgmt/tofu/environments/dev && tofu init'
alias tofu-plan='cd /root/mgmt/tofu/environments/dev && tofu plan -var-file="terraform.tfvars"'
alias tofu-apply='cd /root/mgmt/tofu/environments/dev && tofu apply -var-file="terraform.tfvars"'
alias tofu-destroy='cd /root/mgmt/tofu/environments/dev && tofu destroy -var-file="terraform.tfvars"'
alias tofu-output='cd /root/mgmt/tofu/environments/dev && tofu output'
alias tofu-validate='cd /root/mgmt/tofu/environments/dev && tofu validate'
alias tofu-fmt='cd /root/mgmt/tofu/environments/dev && tofu fmt -recursive'
# Docker Management
alias d='docker'
alias dc='docker-compose'
alias dps='docker ps'
alias dpsa='docker ps -a'
alias di='docker images'
alias dex='docker exec -it'
alias dlog='docker logs -f'
alias dstop='docker stop'
alias dstart='docker start'
alias drm='docker rm'
alias drmi='docker rmi'
alias dclean='docker system prune -f'
alias dbuild='docker build'
alias drun='docker run'
alias dpull='docker pull'
alias dpush='docker push'
# Docker Swarm Management
alias dswarm='docker swarm'
alias dstack='docker stack'
alias dservice='docker service'
alias dnode='docker node'
alias dnetwork='docker network'
alias dsecret='docker secret'
alias dconfig='docker config'
alias dstack-ls='docker stack ls'
alias dstack-rm='docker stack rm'
alias dstack-deploy='docker stack deploy'
alias dservice-ls='docker service ls'
alias dservice-ps='docker service ps'
alias dservice-logs='docker service logs'
# Kubernetes Management
alias k='kubectl'
alias kgp='kubectl get pods'
alias kgs='kubectl get services'
alias kgd='kubectl get deployments'
alias kgn='kubectl get nodes'
alias kgi='kubectl get ingress'
alias kgc='kubectl get configmaps'
alias kgs='kubectl get secrets'
alias kdp='kubectl describe pod'
alias kds='kubectl describe service'
alias kdd='kubectl describe deployment'
alias kdn='kubectl describe node'
alias kdi='kubectl describe ingress'
alias kaf='kubectl apply -f'
alias kdf='kubectl delete -f'
alias kl='kubectl logs -f'
alias ke='kubectl edit'
alias kx='kubectl exec -it'
alias kctx='kubectl config current-context'
alias kuse='kubectl config use-context'
# Git Management
alias gs='git status'
alias ga='git add'
alias gc='git commit'
alias gp='git push'
alias gl='git pull'
alias gd='git diff'
alias gb='git branch'
alias gco='git checkout'
alias gcom='git checkout main'
alias gcod='git checkout develop'
alias gst='git stash'
alias gstp='git stash pop'
alias gstl='git stash list'
alias gstc='git stash clear'
alias gcl='git clone'
alias gfe='git fetch'
alias gme='git merge'
alias gr='git rebase'
alias grc='git rebase --continue'
alias gra='git rebase --abort'
alias gres='git reset'
alias gresh='git reset --hard'
alias gress='git reset --soft'
# System Management
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
alias ports='netstat -tuln'
alias myip='curl -s https://httpbin.org/ip | jq -r .origin'
alias speedtest='curl -s https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py | python3'
alias psg='ps aux | grep'
alias top='htop'
alias cp='cp -i'
alias mv='mv -i'
alias rm='rm -i'
alias mkdir='mkdir -pv'
# Network Management
alias ping='ping -c 4'
alias traceroute='traceroute -n'
alias nmap='nmap -sS -O'
alias ss='ss -tuln'
# File Operations
alias find='find . -name'
alias locate='locate -i'
alias which='which -a'
alias whereis='whereis -b'
# Text Processing
alias cat='cat -n'
alias less='less -R'
alias more='more -R'
alias head='head -n 20'
alias tail='tail -n 20'
alias wc='wc -l'
# Archive Operations
alias tar='tar -v'
alias zip='zip -r'
alias unzip='unzip -l'
alias gzip='gzip -v'
alias gunzip='gunzip -v'
# Process Management
alias jobs='jobs -l'
alias bg='bg %'
alias fg='fg %'
alias kill='kill -9'
alias pkill='pkill -f'
# Environment
alias env='env | sort'
alias set='set | sort'
alias unset='unset'
alias export='export'
alias source='source'
# History
alias h='history'
alias hg='history | grep'
alias hc='history -c'
# Directory Navigation
alias cd..='cd ..'
alias cd...='cd ../..'
alias cd....='cd ../../..'
alias cd-='cd -'
alias cd~='cd ~'
alias cd/='cd /'
# Quick Access
alias vim='vim'
alias nano='nano'
alias emacs='emacs'
alias code='code'
alias subl='subl'
# Monitoring
alias df='df -h'
alias du='du -h'
alias free='free -h'
alias meminfo='cat /proc/meminfo'
alias cpuinfo='cat /proc/cpuinfo'
alias uptime='uptime -p'
# Security
alias chmod='chmod -v'
alias chown='chown -v'
alias chgrp='chgrp -v'
alias passwd='passwd'
alias su='su -'
alias sudo='sudo -E'
# Development
alias make='make -j$(nproc)'
alias cmake='cmake -DCMAKE_BUILD_TYPE=Release'
alias gcc='gcc -Wall -Wextra'
alias g++='g++ -Wall -Wextra'
alias python='python3'
alias pip='pip3'
alias node='node'
alias npm='npm'
alias yarn='yarn'
# Logs
alias journal='journalctl -f'
alias syslog='tail -f /var/log/syslog'
alias auth='tail -f /var/log/auth.log'
alias kern='tail -f /var/log/kern.log'
alias mail='tail -f /var/log/mail.log'
# Backup
alias backup='tar -czf backup-$(date +%Y%m%d-%H%M%S).tar.gz'
alias restore='tar -xzf'
# Cleanup
alias clean='rm -rf ~/.cache/* ~/.tmp/* /tmp/*'
alias clean-docker='docker system prune -af --volumes'
alias clean-k8s='kubectl delete pods --field-selector=status.phase=Succeeded'
alias clean-ansible='rm -rf ~/.ansible/tmp/*'
# Information
alias info='uname -a'
alias whoami='whoami'
alias id='id'
alias groups='groups'
alias users='users'
alias w='w'
alias who='who'
alias last='last -n 10'
# Proxy Management
alias proxy-on='/root/mgmt/scripts/utilities/proxy-toggle.sh on'
alias proxy-off='/root/mgmt/scripts/utilities/proxy-toggle.sh off'
alias proxy-toggle='/root/mgmt/scripts/utilities/proxy-toggle.sh toggle'
alias proxy-enable='/root/mgmt/scripts/utilities/proxy-toggle.sh enable'
alias proxy-disable='/root/mgmt/scripts/utilities/proxy-toggle.sh disable'
alias proxy-status='/root/mgmt/scripts/utilities/proxy-toggle.sh status'
alias proxy-test='/root/mgmt/scripts/utilities/proxy-toggle.sh test'

View File

@@ -1,12 +0,0 @@
# Put files in this folder to add your own custom functionality.
# See: https://github.com/ohmyzsh/ohmyzsh/wiki/Customization
#
# Files in the custom/ directory will be:
# - loaded automatically by the init script, in alphabetical order
# - loaded last, after all built-ins in the lib/ directory, to override them
# - ignored by git by default
#
# Example: add custom/shortcuts.zsh for shortcuts to your local projects
#
# brainstormr=~/Projects/development/planetargon/brainstormr
# cd $brainstormr

View File

@@ -1,6 +0,0 @@
# Put your custom themes in this folder.
# See: https://github.com/ohmyzsh/ohmyzsh/wiki/Customization#overriding-and-adding-themes
#
# Example:
PROMPT="%{$fg[red]%}%n%{$reset_color%}@%{$fg[blue]%}%m %{$fg[yellow]%}%~ %{$reset_color%}%% "

View File

@@ -1,153 +0,0 @@
#!/bin/bash
# 快速安装脚本 - 从 Gitea 仓库直接安装 ZSH 配置
# 用法: curl -fsSL https://your-gitea.com/ben/mgmt/raw/branch/main/configuration/zsh/quick-install.sh | bash
set -euo pipefail
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Gitea 仓库信息
GITEA_URL="https://ben:8d7d70f324796be650b79415303c31f567bf459b@gitea.tailnet-68f9.ts.net/ben/mgmt.git"
MGMT_DIR="/root/mgmt"
log_info "开始快速安装 ZSH 配置..."
# 检查 root 权限
if [[ $EUID -ne 0 ]]; then
log_error "此脚本需要 root 权限运行"
exit 1
fi
# 克隆或更新仓库
if [[ -d "$MGMT_DIR" ]]; then
log_info "更新现有仓库..."
cd "$MGMT_DIR"
git pull origin main
else
log_info "克隆仓库..."
git clone "$GITEA_URL" "$MGMT_DIR"
cd "$MGMT_DIR"
fi
# 询问用户是否使用代理
echo ""
log_info "网络环境检测:"
echo " 检测到可能需要代理访问外网资源(如 GitHub"
echo ""
log_info "是否使用代理进行安装?"
echo " Y - 使用代理安装(推荐,确保下载成功)"
echo " N - 直连安装(如果网络环境良好)"
echo ""
while true; do
read -p "请选择 (Y/n): " choice
case $choice in
[Yy]|"")
log_info "选择使用代理安装"
PROXY_URL="http://istoreos.tailnet-68f9.ts.net:1082"
# 测试代理连接
if curl -s --connect-timeout 5 --proxy "$PROXY_URL" https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh >/dev/null 2>&1; then
log_success "代理连接正常,设置代理环境"
export http_proxy="$PROXY_URL"
export https_proxy="$PROXY_URL"
export HTTP_PROXY="$PROXY_URL"
export HTTPS_PROXY="$PROXY_URL"
# 创建代理配置文件
cat > "$MGMT_DIR/configuration/proxy.env" << EOF
# Proxy Configuration for istoreos.tailnet-68f9.ts.net:1082
export http_proxy=${PROXY_URL}
export https_proxy=${PROXY_URL}
export HTTP_PROXY=${PROXY_URL}
export HTTPS_PROXY=${PROXY_URL}
export no_proxy=localhost,127.0.0.1,::1,.local,.tailnet-68f9.ts.net
export NO_PROXY=localhost,127.0.0.1,::1,.local,.tailnet-68f9.ts.net
export ALL_PROXY=${PROXY_URL}
export all_proxy=${PROXY_URL}
export GIT_HTTP_PROXY=${PROXY_URL}
export GIT_HTTPS_PROXY=${PROXY_URL}
export CURL_PROXY=${PROXY_URL}
export WGET_PROXY=${PROXY_URL}
EOF
else
log_error "代理连接失败,无法继续安装"
exit 1
fi
break
;;
[Nn])
log_info "选择直连安装"
# 测试直连
if curl -s --connect-timeout 5 https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh >/dev/null 2>&1; then
log_success "直连正常,开始安装"
else
log_error "直连失败,无法继续安装"
exit 1
fi
break
;;
*)
log_warning "无效选择,请输入 Y 或 N"
;;
esac
done
# 运行安装脚本
log_info "运行 ZSH 配置安装脚本..."
chmod +x "$MGMT_DIR/configuration/zsh/install-zsh-config.sh"
"$MGMT_DIR/configuration/zsh/install-zsh-config.sh"
log_success "快速安装完成!"
# 如果创建了代理配置,询问是否保持
if [[ -f "$MGMT_DIR/configuration/proxy.env" ]]; then
echo ""
log_info "安装完成!代理已临时开启用于安装。"
echo ""
log_info "是否保持代理开启?"
echo " Y - 保持代理开启(推荐,方便访问外网)"
echo " N - 关闭代理(如果不需要访问外网)"
echo ""
while true; do
read -p "请选择 (Y/n): " choice
case $choice in
[Yy]|"")
log_success "代理保持开启"
log_info "使用 'proxy-status' 查看代理状态"
log_info "使用 'proxy-toggle' 切换代理状态"
break
;;
[Nn])
log_info "关闭代理..."
rm -f "$MGMT_DIR/configuration/proxy.env"
log_success "代理已关闭"
break
;;
*)
log_warning "无效选择,请输入 Y 或 N"
;;
esac
done
fi
log_info "请重新登录或运行: source ~/.zshrc"

View File

@@ -1,215 +0,0 @@
#!/bin/bash
# 智能安装脚本 - 自动检测网络环境并设置代理
# 用法: curl -fsSL https://your-gitea.com/ben/mgmt/raw/branch/main/configuration/zsh/smart-install.sh | bash
set -euo pipefail
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Gitea 仓库信息
GITEA_URL="https://ben:8d7d70f324796be650b79415303c31f567bf459b@gitea.tailnet-68f9.ts.net/ben/mgmt.git"
MGMT_DIR="/root/mgmt"
PROXY_HOST="istoreos.tailnet-68f9.ts.net"
PROXY_PORT="1082"
PROXY_URL="http://${PROXY_HOST}:${PROXY_PORT}"
# 检查 root 权限
if [[ $EUID -ne 0 ]]; then
log_error "此脚本需要 root 权限运行"
exit 1
fi
# 询问用户是否使用代理
ask_proxy_usage() {
echo ""
log_info "网络环境检测:"
echo " 检测到可能需要代理访问外网资源(如 GitHub"
echo ""
log_info "是否使用代理进行安装?"
echo " Y - 使用代理安装(推荐,确保下载成功)"
echo " N - 直连安装(如果网络环境良好)"
echo ""
while true; do
read -p "请选择 (Y/n): " choice
case $choice in
[Yy]|"")
log_info "选择使用代理安装"
return 0
;;
[Nn])
log_info "选择直连安装"
return 1
;;
*)
log_warning "无效选择,请输入 Y 或 N"
;;
esac
done
}
# 测试代理连接
test_proxy_connection() {
log_info "测试代理连接..."
if curl -s --connect-timeout 5 --proxy "$PROXY_URL" https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh >/dev/null 2>&1; then
log_success "代理连接正常"
return 0
else
log_error "代理连接失败"
return 1
fi
}
# 测试直连
test_direct_connection() {
log_info "测试直连..."
if curl -s --connect-timeout 5 https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh >/dev/null 2>&1; then
log_success "直连正常"
return 0
else
log_error "直连失败"
return 1
fi
}
# 设置代理环境
setup_proxy_env() {
log_info "设置代理环境..."
export http_proxy="$PROXY_URL"
export https_proxy="$PROXY_URL"
export HTTP_PROXY="$PROXY_URL"
export HTTPS_PROXY="$PROXY_URL"
export no_proxy="localhost,127.0.0.1,::1,.local,.tailnet-68f9.ts.net"
export NO_PROXY="localhost,127.0.0.1,::1,.local,.tailnet-68f9.ts.net"
log_success "代理环境已设置"
}
# 克隆或更新仓库
clone_repository() {
log_info "获取配置仓库..."
if [[ -d "$MGMT_DIR" ]]; then
log_info "更新现有仓库..."
cd "$MGMT_DIR"
git pull origin main
else
log_info "克隆仓库..."
git clone "$GITEA_URL" "$MGMT_DIR"
cd "$MGMT_DIR"
fi
}
# 创建代理配置文件
create_proxy_config() {
log_info "创建代理配置文件..."
cat > "$MGMT_DIR/configuration/proxy.env" << EOF
# Proxy Configuration for ${PROXY_HOST}:${PROXY_PORT}
export http_proxy=${PROXY_URL}
export https_proxy=${PROXY_URL}
export HTTP_PROXY=${PROXY_URL}
export HTTPS_PROXY=${PROXY_URL}
export no_proxy=localhost,127.0.0.1,::1,.local,.tailnet-68f9.ts.net
export NO_PROXY=localhost,127.0.0.1,::1,.local,.tailnet-68f9.ts.net
export ALL_PROXY=${PROXY_URL}
export all_proxy=${PROXY_URL}
export GIT_HTTP_PROXY=${PROXY_URL}
export GIT_HTTPS_PROXY=${PROXY_URL}
export CURL_PROXY=${PROXY_URL}
export WGET_PROXY=${PROXY_URL}
EOF
log_success "代理配置文件已创建"
}
# 询问用户是否保持代理
ask_proxy_keep() {
if [[ -f "$MGMT_DIR/configuration/proxy.env" ]]; then
echo ""
log_info "安装完成!代理已临时开启用于安装。"
echo ""
log_info "是否保持代理开启?"
echo " Y - 保持代理开启(推荐,方便访问外网)"
echo " N - 关闭代理(如果不需要访问外网)"
echo ""
while true; do
read -p "请选择 (Y/n): " choice
case $choice in
[Yy]|"")
log_success "代理保持开启"
log_info "使用 'proxy-status' 查看代理状态"
log_info "使用 'proxy-toggle' 切换代理状态"
break
;;
[Nn])
log_info "关闭代理..."
if [[ -f "$MGMT_DIR/scripts/utilities/proxy-toggle.sh" ]]; then
"$MGMT_DIR/scripts/utilities/proxy-toggle.sh" disable
else
rm -f "$MGMT_DIR/configuration/proxy.env"
log_success "代理已关闭"
fi
break
;;
*)
log_warning "无效选择,请输入 Y 或 N"
;;
esac
done
fi
}
# 主安装流程
main() {
log_info "开始智能安装 ZSH 配置..."
# 询问用户是否使用代理
if ask_proxy_usage; then
# 用户选择使用代理
if test_proxy_connection; then
setup_proxy_env
create_proxy_config
log_success "代理环境已设置,开始安装..."
else
log_error "代理连接失败,无法继续安装"
exit 1
fi
else
# 用户选择直连
if test_direct_connection; then
log_success "直连正常,开始安装..."
else
log_error "直连失败,无法继续安装"
exit 1
fi
fi
# 克隆仓库
clone_repository
# 运行安装脚本
log_info "运行 ZSH 配置安装脚本..."
chmod +x "$MGMT_DIR/configuration/zsh/install-zsh-config.sh"
"$MGMT_DIR/configuration/zsh/install-zsh-config.sh"
log_success "智能安装完成!"
# 如果使用了代理,询问是否保持
ask_proxy_keep
}
main "$@"

View File

@@ -1,151 +0,0 @@
#!/bin/bash
# 测试 ZSH 插件是否正确安装
set -euo pipefail
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
test_plugin() {
local plugin_name="$1"
local plugin_path="$2"
if [[ -d "$plugin_path" ]]; then
log_success "$plugin_name 已安装"
return 0
else
log_error "$plugin_name 未安装: $plugin_path"
return 1
fi
}
test_alias() {
local alias_name="$1"
if alias "$alias_name" &>/dev/null; then
log_success "✓ 别名 $alias_name 已加载"
return 0
else
log_warning "✗ 别名 $alias_name 未加载"
return 1
fi
}
main() {
log_info "测试 ZSH 插件和配置..."
echo ""
local failed=0
# 测试 oh-my-zsh 安装
log_info "检查 oh-my-zsh 安装..."
if [[ -d "$HOME/.oh-my-zsh" ]]; then
log_success "✓ oh-my-zsh 已安装"
else
log_error "✗ oh-my-zsh 未安装"
((failed++))
fi
echo ""
# 测试自定义插件
log_info "检查自定义插件..."
test_plugin "zsh-autosuggestions" "$HOME/.oh-my-zsh/custom/plugins/zsh-autosuggestions" || ((failed++))
test_plugin "zsh-syntax-highlighting" "$HOME/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting" || ((failed++))
test_plugin "zsh-completions" "$HOME/.oh-my-zsh/custom/plugins/zsh-completions" || ((failed++))
echo ""
# 测试内置插件
log_info "检查内置插件..."
test_plugin "git" "$HOME/.oh-my-zsh/plugins/git" || ((failed++))
test_plugin "docker" "$HOME/.oh-my-zsh/plugins/docker" || ((failed++))
test_plugin "ansible" "$HOME/.oh-my-zsh/plugins/ansible" || ((failed++))
test_plugin "terraform" "$HOME/.oh-my-zsh/plugins/terraform" || ((failed++))
test_plugin "kubectl" "$HOME/.oh-my-zsh/plugins/kubectl" || ((failed++))
echo ""
# 测试自定义别名文件
log_info "检查自定义别名..."
if [[ -f "$HOME/.oh-my-zsh/custom/aliases.zsh" ]]; then
log_success "✓ 自定义别名文件已安装"
else
log_warning "✗ 自定义别名文件未安装"
fi
echo ""
# 测试一些关键别名
log_info "检查关键别名..."
test_alias "mgmt" || ((failed++))
test_alias "dps" || ((failed++))
test_alias "k" || ((failed++))
test_alias "gs" || ((failed++))
echo ""
# 测试 .zshrc 文件
log_info "检查 .zshrc 配置..."
if [[ -f "$HOME/.zshrc" ]]; then
log_success "✓ .zshrc 文件存在"
# 检查关键配置
if grep -q "zsh-autosuggestions" "$HOME/.zshrc"; then
log_success "✓ zsh-autosuggestions 已配置"
else
log_warning "✗ zsh-autosuggestions 未配置"
fi
if grep -q "zsh-syntax-highlighting" "$HOME/.zshrc"; then
log_success "✓ zsh-syntax-highlighting 已配置"
else
log_warning "✗ zsh-syntax-highlighting 未配置"
fi
if grep -q "agnoster" "$HOME/.zshrc"; then
log_success "✓ agnoster 主题已配置"
else
log_warning "✗ agnoster 主题未配置"
fi
else
log_error "✗ .zshrc 文件不存在"
((failed++))
fi
echo ""
# 总结
if [[ $failed -eq 0 ]]; then
log_success "🎉 所有测试通过ZSH 配置完整。"
echo ""
log_info "使用方法:"
echo " - 重新登录或运行: source ~/.zshrc"
echo " - 测试自动建议: 输入 'docker' 然后按 → 键"
echo " - 测试别名: 运行 'mgmt-status' 或 'dps'"
else
log_error "❌ 发现 $failed 个问题,请检查安装。"
echo ""
log_info "修复建议:"
echo " 1. 重新运行安装脚本"
echo " 2. 检查网络连接"
echo " 3. 手动安装缺失的插件"
fi
}
main "$@"

View File

@@ -1,260 +0,0 @@
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:$HOME/.local/bin:/usr/local/bin:$PATH
# Path to your Oh My Zsh installation.
export ZSH="$HOME/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time Oh My Zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
ZSH_THEME="agnoster"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in $ZSH/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment one of the following lines to change the auto-update behavior
# zstyle ':omz:update' mode disabled # disable automatic updates
# zstyle ':omz:update' mode auto # update automatically without asking
zstyle ':omz:update' mode reminder # just remind me to update when it's time
# Uncomment the following line to change how often to auto-update (in days).
# zstyle ':omz:update' frequency 13
# Uncomment the following line if pasting URLs and other text is messed up.
# DISABLE_MAGIC_FUNCTIONS="true"
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# You can also set it to another string to have that shown instead of the default red dots.
# e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f"
# Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765)
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in $ZSH/plugins/
# Custom plugins may be added to $ZSH_CUSTOM/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
git
docker
docker-compose
ansible
terraform
kubectl
helm
aws
gcloud
zsh-autosuggestions
zsh-syntax-highlighting
zsh-completions
colored-man-pages
command-not-found
extract
history-substring-search
sudo
systemd
tmux
vscode
web-search
z
)
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
export EDITOR='vim'
fi
# Compilation flags
# export ARCHFLAGS="-arch $(uname -m)"
# =============================================================================
# CUSTOM CONFIGURATION FOR MANAGEMENT SYSTEM
# =============================================================================
# Load proxy configuration if exists
if [[ -f /root/mgmt/configuration/proxy.env ]]; then
source /root/mgmt/configuration/proxy.env
fi
# Project management aliases
alias mgmt='cd /root/mgmt'
alias mgmt-status='cd /root/mgmt && ./mgmt.sh status'
alias mgmt-deploy='cd /root/mgmt && ./mgmt.sh deploy'
alias mgmt-cleanup='cd /root/mgmt && ./mgmt.sh cleanup'
# Ansible aliases
alias ansible-check='cd /root/mgmt/configuration && ansible-playbook --syntax-check'
alias ansible-deploy='cd /root/mgmt/configuration && ansible-playbook -i inventories/production/inventory.ini'
alias ansible-ping='cd /root/mgmt/configuration && ansible -i inventories/production/inventory.ini all -m ping'
# OpenTofu/Terraform aliases
alias tofu-init='cd /root/mgmt/tofu/environments/dev && tofu init'
alias tofu-plan='cd /root/mgmt/tofu/environments/dev && tofu plan -var-file="terraform.tfvars"'
alias tofu-apply='cd /root/mgmt/tofu/environments/dev && tofu apply -var-file="terraform.tfvars"'
alias tofu-destroy='cd /root/mgmt/tofu/environments/dev && tofu destroy -var-file="terraform.tfvars"'
alias tofu-output='cd /root/mgmt/tofu/environments/dev && tofu output'
# Docker aliases
alias d='docker'
alias dc='docker-compose'
alias dps='docker ps'
alias dpsa='docker ps -a'
alias di='docker images'
alias dex='docker exec -it'
alias dlog='docker logs -f'
alias dstop='docker stop'
alias dstart='docker start'
alias drm='docker rm'
alias drmi='docker rmi'
alias dclean='docker system prune -f'
# Docker Swarm aliases
alias dswarm='docker swarm'
alias dstack='docker stack'
alias dservice='docker service'
alias dnode='docker node'
alias dnetwork='docker network'
alias dsecret='docker secret'
alias dconfig='docker config'
# Kubernetes aliases
alias k='kubectl'
alias kgp='kubectl get pods'
alias kgs='kubectl get services'
alias kgd='kubectl get deployments'
alias kgn='kubectl get nodes'
alias kdp='kubectl describe pod'
alias kds='kubectl describe service'
alias kdd='kubectl describe deployment'
alias kaf='kubectl apply -f'
alias kdf='kubectl delete -f'
alias kl='kubectl logs -f'
# Git aliases
alias gs='git status'
alias ga='git add'
alias gc='git commit'
alias gp='git push'
alias gl='git pull'
alias gd='git diff'
alias gb='git branch'
alias gco='git checkout'
alias gcom='git checkout main'
alias gcod='git checkout develop'
alias gst='git stash'
alias gstp='git stash pop'
# System aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
# Network aliases
alias ports='netstat -tuln'
alias myip='curl -s https://httpbin.org/ip | jq -r .origin'
alias speedtest='curl -s https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py | python3'
# Process aliases
alias psg='ps aux | grep'
alias top='htop'
# File operations
alias cp='cp -i'
alias mv='mv -i'
alias rm='rm -i'
alias mkdir='mkdir -pv'
# History configuration
HISTSIZE=10000
SAVEHIST=10000
HISTFILE=~/.zsh_history
setopt HIST_VERIFY
setopt SHARE_HISTORY
setopt APPEND_HISTORY
setopt INC_APPEND_HISTORY
setopt HIST_IGNORE_DUPS
setopt HIST_IGNORE_ALL_DUPS
setopt HIST_REDUCE_BLANKS
setopt HIST_IGNORE_SPACE
# Auto-completion configuration
autoload -U compinit && compinit
zstyle ':completion:*' matcher-list 'm:{a-zA-Z}={A-Za-z}'
zstyle ':completion:*' list-colors "${(s.:.)LS_COLORS}"
zstyle ':completion:*' menu select
# Key bindings
bindkey '^[[A' history-substring-search-up
bindkey '^[[B' history-substring-search-down
bindkey '^[[1;5C' forward-word
bindkey '^[[1;5D' backward-word
# Auto-suggestions configuration
ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=8'
ZSH_AUTOSUGGEST_STRATEGY=(history completion)
# Syntax highlighting configuration
ZSH_HIGHLIGHT_HIGHLIGHTERS=(main brackets pattern cursor)
# Welcome message
echo "🚀 Management System Shell Ready!"
echo "📁 Project: /root/mgmt"
echo "🔧 Available commands: mgmt-status, mgmt-deploy, mgmt-cleanup"
echo "🐳 Docker: d, dc, dps, dex, dlog"
echo "☸️ Kubernetes: k, kgp, kgs, kaf, kdf"
echo "🏗️ OpenTofu: tofu-init, tofu-plan, tofu-apply"
echo "⚙️ Ansible: ansible-check, ansible-deploy, ansible-ping"
echo ""