重构程序文件目录结构并更新相关路径引用

- 创建新的目录结构:research/、tools/(含子目录)和apps/
- 移动核心理论文件到research/core-theory/
- 移动天山理论文件到research/specialized/
- 重组tools/目录为多个子目录:content-generation/、data-processing/等
- 更新所有文档中的路径引用,包括README.md、项目结构说明.md等
- 更新工作流文件和脚本中的路径引用
- 更新文档索引文件中的路径引用
This commit is contained in:
ben
2025-10-27 12:54:26 +00:00
parent a60b82182d
commit 5b0a6c7bc1
22 changed files with 243 additions and 70 deletions

View File

@@ -0,0 +1,119 @@
#!/usr/bin/env python3
"""
测试Milvus GPU配置的脚本
"""
import sys
import os
def test_gpu_availability():
"""测试GPU可用性"""
print("=== GPU可用性测试 ===")
# 测试PyTorch CUDA
try:
import torch
print(f"PyTorch版本: {torch.__version__}")
print(f"CUDA可用: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"CUDA设备数量: {torch.cuda.device_count()}")
print(f"设备名称: {torch.cuda.get_device_name(0)}")
print(f"CUDA版本: {torch.version.cuda}")
return True
else:
print("CUDA不可用")
return False
except Exception as e:
print(f"PyTorch测试失败: {e}")
return False
def test_milvus_connection():
"""测试Milvus连接"""
print("\n=== Milvus连接测试 ===")
try:
from pymilvus import connections, utility
print("PyMilvus导入成功")
# 尝试连接本地Milvus
try:
connections.connect("default", host="localhost", port="19530")
print("Milvus连接成功")
# 检查Milvus版本
version = utility.get_server_version()
print(f"Milvus版本: {version}")
return True
except Exception as e:
print(f"Milvus连接失败: {e}")
print("注意: 这需要先启动Milvus服务")
return False
except Exception as e:
print(f"PyMilvus导入失败: {e}")
return False
def test_gpu_memory():
"""测试GPU内存"""
print("\n=== GPU内存测试 ===")
try:
import torch
if torch.cuda.is_available():
# 获取GPU内存信息
device = torch.cuda.current_device()
total_memory = torch.cuda.get_device_properties(device).total_memory
allocated_memory = torch.cuda.memory_allocated(device)
cached_memory = torch.cuda.memory_reserved(device)
print(f"GPU总内存: {total_memory / 1024**3:.2f} GB")
print(f"已分配内存: {allocated_memory / 1024**3:.2f} GB")
print(f"缓存内存: {cached_memory / 1024**3:.2f} GB")
# 测试GPU计算
print("\n=== GPU计算测试 ===")
x = torch.randn(1000, 1000, device='cuda')
y = torch.randn(1000, 1000, device='cuda')
z = torch.mm(x, y)
print("GPU矩阵乘法测试成功!")
print(f"结果张量形状: {z.shape}")
print(f"结果张量设备: {z.device}")
return True
else:
print("CUDA不可用无法测试GPU内存")
return False
except Exception as e:
print(f"GPU内存测试失败: {e}")
return False
def main():
print("Milvus GPU配置测试")
print("=" * 50)
# 测试GPU可用性
gpu_available = test_gpu_availability()
# 测试Milvus连接
milvus_available = test_milvus_connection()
# 测试GPU内存和计算
gpu_memory_ok = False
if gpu_available:
gpu_memory_ok = test_gpu_memory()
# 总结
print("\n" + "=" * 50)
print("测试总结:")
print(f"GPU可用: {'' if gpu_available else ''}")
print(f"Milvus连接: {'' if milvus_available else ''}")
print(f"GPU计算: {'' if gpu_memory_ok else ''}")
if gpu_available and gpu_memory_ok:
print("\n🎉 GPU配置正常可以用于Milvus等GPU加速应用!")
else:
print("\n⚠️ GPU配置存在问题可能影响Milvus等应用的GPU加速功能")
if __name__ == "__main__":
main()