feat: Complete project reorganization

- Reorganize core-theory into 6 thematic subdirectories (01-06)
- Clean up root directory, move files to appropriate locations
- Create comprehensive README files for navigation
- Add new academic papers and research content
- Implement English naming convention for better indexing
- Total: 42 core theory files organized, 7 READMEs created

Key improvements:
- Clear hierarchical structure (foundational → matrix → analysis → empires)
- Better discoverability with detailed navigation guides
- Scalable framework for future content expansion
- AI-friendly organization for indexing and processing
This commit is contained in:
ben
2025-10-21 12:56:11 +00:00
parent b6105b6770
commit 1595d7f68e
105 changed files with 19448 additions and 197 deletions

119
tools/test_milvus_gpu.py Normal file
View File

@@ -0,0 +1,119 @@
#!/usr/bin/env python3
"""
测试Milvus GPU配置的脚本
"""
import sys
import os
def test_gpu_availability():
"""测试GPU可用性"""
print("=== GPU可用性测试 ===")
# 测试PyTorch CUDA
try:
import torch
print(f"PyTorch版本: {torch.__version__}")
print(f"CUDA可用: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"CUDA设备数量: {torch.cuda.device_count()}")
print(f"设备名称: {torch.cuda.get_device_name(0)}")
print(f"CUDA版本: {torch.version.cuda}")
return True
else:
print("CUDA不可用")
return False
except Exception as e:
print(f"PyTorch测试失败: {e}")
return False
def test_milvus_connection():
"""测试Milvus连接"""
print("\n=== Milvus连接测试 ===")
try:
from pymilvus import connections, utility
print("PyMilvus导入成功")
# 尝试连接本地Milvus
try:
connections.connect("default", host="localhost", port="19530")
print("Milvus连接成功")
# 检查Milvus版本
version = utility.get_server_version()
print(f"Milvus版本: {version}")
return True
except Exception as e:
print(f"Milvus连接失败: {e}")
print("注意: 这需要先启动Milvus服务")
return False
except Exception as e:
print(f"PyMilvus导入失败: {e}")
return False
def test_gpu_memory():
"""测试GPU内存"""
print("\n=== GPU内存测试 ===")
try:
import torch
if torch.cuda.is_available():
# 获取GPU内存信息
device = torch.cuda.current_device()
total_memory = torch.cuda.get_device_properties(device).total_memory
allocated_memory = torch.cuda.memory_allocated(device)
cached_memory = torch.cuda.memory_reserved(device)
print(f"GPU总内存: {total_memory / 1024**3:.2f} GB")
print(f"已分配内存: {allocated_memory / 1024**3:.2f} GB")
print(f"缓存内存: {cached_memory / 1024**3:.2f} GB")
# 测试GPU计算
print("\n=== GPU计算测试 ===")
x = torch.randn(1000, 1000, device='cuda')
y = torch.randn(1000, 1000, device='cuda')
z = torch.mm(x, y)
print("GPU矩阵乘法测试成功!")
print(f"结果张量形状: {z.shape}")
print(f"结果张量设备: {z.device}")
return True
else:
print("CUDA不可用无法测试GPU内存")
return False
except Exception as e:
print(f"GPU内存测试失败: {e}")
return False
def main():
print("Milvus GPU配置测试")
print("=" * 50)
# 测试GPU可用性
gpu_available = test_gpu_availability()
# 测试Milvus连接
milvus_available = test_milvus_connection()
# 测试GPU内存和计算
gpu_memory_ok = False
if gpu_available:
gpu_memory_ok = test_gpu_memory()
# 总结
print("\n" + "=" * 50)
print("测试总结:")
print(f"GPU可用: {'' if gpu_available else ''}")
print(f"Milvus连接: {'' if milvus_available else ''}")
print(f"GPU计算: {'' if gpu_memory_ok else ''}")
if gpu_available and gpu_memory_ok:
print("\n🎉 GPU配置正常可以用于Milvus等GPU加速应用!")
else:
print("\n⚠️ GPU配置存在问题可能影响Milvus等应用的GPU加速功能")
if __name__ == "__main__":
main()