🏗️ 项目重构:模块化清理完成

This commit is contained in:
llama-research
2025-09-01 12:29:27 +00:00
parent ef7657101a
commit f9856c31e5
349 changed files with 41438 additions and 254 deletions

View File

@@ -0,0 +1 @@
# 配置管理模块

View File

@@ -0,0 +1,244 @@
#!/usr/bin/env python3
"""
Doppler配置管理模块
安全地从Doppler获取配置和密钥
"""
import os
from typing import Optional, Dict, Any
# 新增:优先加载 .env若存在
try:
from dotenv import load_dotenv, find_dotenv # type: ignore
_env_path = find_dotenv()
if _env_path:
load_dotenv(_env_path)
else:
# 尝试从项目根目录加载 .env
from pathlib import Path
root_env = Path(__file__).resolve().parents[1] / '.env'
if root_env.exists():
load_dotenv(root_env)
except Exception:
# 若未安装 python-dotenv 或加载失败,则跳过
pass
def get_secret(key: str, default: Optional[str] = None) -> Optional[str]:
"""
从Doppler或环境变量获取密钥
Args:
key: 密钥名称
default: 默认值
Returns:
密钥值或默认值
"""
# 临时的、不安全的解决方案,仅用于测试
temp_secrets = {
"RAPIDAPI_KEY": "your_rapidapi_key_here",
"OPENROUTER_API_KEY_1": "your_openrouter_key_here",
"GOOGLE_API_KEY": "your_google_api_key_here"
}
# 首先尝试从环境变量获取Doppler会注入到环境变量或由 .env 加载)
value = os.getenv(key)
if not value:
value = temp_secrets.get(key, default)
if not value and default is None:
raise ValueError(f"Required secret '{key}' not found in environment variables or temp_secrets")
return value
def get_rapidapi_key() -> str:
"""
获取RapidAPI密钥
Returns:
RapidAPI密钥
Raises:
ValueError: 如果密钥未找到
"""
return get_secret('RAPIDAPI_KEY')
def get_openrouter_key() -> str:
"""
获取OpenRouter API密钥
Returns:
OpenRouter API密钥
Raises:
ValueError: 如果密钥未找到
"""
return get_secret('OPENROUTER_API_KEY_1')
def get_google_api_key() -> str:
"""
获取Google API密钥 (用于 Gemini/ADK)
Returns:
Google API密钥
Raises:
ValueError: 如果密钥未找到
"""
return get_secret('GOOGLE_API_KEY')
def get_google_genai_config() -> Dict[str, str]:
"""
获取Google GenAI完整配置
Returns:
Google GenAI配置字典
"""
use_vertex_ai = get_secret('GOOGLE_GENAI_USE_VERTEXAI', 'FALSE').upper() == 'TRUE'
api_key = '' if use_vertex_ai else get_secret('GOOGLE_API_KEY', '')
return {
'api_key': api_key,
'use_vertex_ai': str(use_vertex_ai).upper(),
'project_id': get_secret('GOOGLE_CLOUD_PROJECT_ID', ''),
'location': get_secret('GOOGLE_CLOUD_LOCATION', 'us-central1'),
'memory_bank_enabled': get_secret('VERTEX_MEMORY_BANK_ENABLED', 'TRUE'),
'service_account_key': get_secret('GOOGLE_SERVICE_ACCOUNT_KEY', '')
}
def get_cloudflare_config() -> Dict[str, str]:
"""
获取Cloudflare配置
Returns:
Cloudflare配置字典
"""
return {
# 敏感信息从Doppler获取
'account_id': get_secret('CLOUDFLARE_ACCOUNT_ID', ''),
'api_token': get_secret('CLOUDFLARE_API_TOKEN', ''),
# 非敏感配置,明文写在代码里
'vectorize_index': 'autorag-shy-cherry-f1fb',
'embed_model': '@cf/baai/bge-m3',
'autorag_domain': 'autorag.seekkey.tech'
}
def get_database_config() -> Dict[str, str]:
"""
获取数据库配置
Returns:
数据库配置字典
"""
return {
'postgres_url': get_secret('POSTGRES_URL', ''),
'mongodb_url': get_secret('MONGODB_URL', ''),
'zilliz_url': get_secret('ZILLIZ_URL', ''),
'zilliz_token': get_secret('ZILLIZ_TOKEN', '')
}
def validate_config(mode: str = "hybrid") -> bool:
"""
验证必要的配置是否存在
Args:
mode: 验证模式 ("openrouter", "google_adk", "hybrid")
Returns:
配置是否有效
"""
print(f"🔧 当前模式: {mode}")
required_keys = []
# 模式特定配置
if mode == "openrouter":
required_keys.extend(['RAPIDAPI_KEY', 'OPENROUTER_API_KEY_1'])
# 验证 OpenRouter 配置
openrouter_key = get_secret('OPENROUTER_API_KEY_1', '')
if not openrouter_key:
print("❌ OpenRouter API Key 未配置")
return False
print("✅ OpenRouter 配置验证通过")
elif mode == "google_adk":
genai_config = get_google_genai_config()
use_vertex = genai_config.get('use_vertex_ai', 'FALSE').upper() == 'TRUE'
if not use_vertex:
required_keys.extend(['GOOGLE_API_KEY'])
# 验证 Google ADK 配置
google_key = get_secret('GOOGLE_API_KEY', '')
if not google_key:
print("❌ Google API Key 未配置")
print("请访问 https://aistudio.google.com/ 获取 API 密钥")
print("然后运行: doppler secrets set GOOGLE_API_KEY=your_key")
return False
print(f"✅ Google ADK 配置验证通过 (密钥长度: {len(google_key)} 字符)")
else:
print("✅ Google ADK (Vertex AI) 配置验证通过")
# 显示 Google GenAI 配置
print(f"📱 Google GenAI 配置:")
if not use_vertex:
print(f" - API Key: 已配置")
print(f" - Use Vertex AI: {genai_config.get('use_vertex_ai', False)}")
if genai_config.get('project_id'):
print(f" - Project ID: {genai_config['project_id']}")
if genai_config.get('location'):
print(f" - Location: {genai_config['location']}")
else: # hybrid mode
required_keys.extend(['RAPIDAPI_KEY'])
# 检查至少有一个AI API密钥
ai_keys = ['OPENROUTER_API_KEY_1', 'GOOGLE_API_KEY']
if not any(os.getenv(key) for key in ai_keys):
print("❌ 需要至少配置一个AI API密钥:")
print(" - OPENROUTER_API_KEY_1 (OpenRouter模式)")
print(" - GOOGLE_API_KEY (Google ADK模式)")
return False
# 验证混合模式配置
openrouter_key = get_secret('OPENROUTER_API_KEY_1', '')
google_key = get_secret('GOOGLE_API_KEY', '')
available_services = []
if openrouter_key:
available_services.append("OpenRouter")
if google_key:
available_services.append("Google ADK")
print(f"✅ 混合模式配置验证通过,可用服务: {', '.join(available_services)}")
missing_keys = []
for key in required_keys:
if not os.getenv(key):
missing_keys.append(key)
if missing_keys:
print(f"❌ 缺少必要的配置: {', '.join(missing_keys)}")
print("请确保已正确配置Doppler或环境变量")
return False
# 显示配置状态
print("✅ 配置验证通过")
print(f"📋 当前模式: {mode}")
# 显示可用的AI服务
ai_services = []
if os.getenv('OPENROUTER_API_KEY_1'):
ai_services.append("OpenRouter")
if os.getenv('GOOGLE_API_KEY'):
ai_services.append("Google ADK")
if ai_services:
print(f"🤖 可用AI服务: {', '.join(ai_services)}")
return True
if __name__ == "__main__":
# 配置验证脚本
print("🔧 验证配置...")
validate_config()

View File

@@ -0,0 +1,141 @@
#!/usr/bin/env python3
"""
项目配置管理模块
从环境变量 (.env) 文件中安全地加载配置和密钥。
"""
import os
from typing import Optional, Dict, Any
from pathlib import Path
# 确保 .env 文件在项目根目录被加载
try:
from dotenv import load_dotenv
# 构建到项目根目录的路径 (假设此文件在 config/ 目录下)
env_path = Path(__file__).resolve().parents[1] / '.env'
if env_path.exists():
load_dotenv(dotenv_path=env_path)
else:
print("Warning: .env file not found at project root. Relying on system environment variables.")
except ImportError:
print("Warning: python-dotenv not installed. Relying on system environment variables.")
pass
def get_secret(key: str, default: Optional[str] = None) -> str:
"""
从环境变量获取密钥。
Args:
key: 密钥名称
default: 如果未找到密钥,则返回的默认值
Returns:
密钥值
Raises:
ValueError: 如果密钥未找到且没有提供默认值。
"""
value = os.getenv(key)
if value is not None:
return value
if default is not None:
return default
raise ValueError(f"Required secret '{key}' not found in environment variables. "
"Please ensure it is set in your .env file or system environment.")
def get_rapidapi_key() -> str:
return get_secret('RAPIDAPI_KEY')
def get_openrouter_key() -> str:
return get_secret('OPENROUTER_API_KEY_1')
def get_google_api_key() -> str:
return get_secret('GOOGLE_API_KEY')
def get_google_genai_config() -> Dict[str, Any]:
"""
获取Google GenAI完整配置
"""
use_vertex_ai = get_secret('GOOGLE_GENAI_USE_VERTEXAI', 'FALSE').upper() == 'TRUE'
return {
'api_key': get_secret('GOOGLE_API_KEY', '') if not use_vertex_ai else '',
'use_vertex_ai': use_vertex_ai,
'project_id': get_secret('GOOGLE_CLOUD_PROJECT_ID', ''),
'location': get_secret('GOOGLE_CLOUD_LOCATION', 'us-central1'),
'memory_bank_enabled': get_secret('VERTEX_MEMORY_BANK_ENABLED', 'TRUE').upper() == 'TRUE',
'service_account_key': get_secret('GOOGLE_SERVICE_ACCOUNT_KEY', '')
}
def get_cloudflare_config() -> Dict[str, str]:
"""
获取Cloudflare配置
"""
return {
'account_id': get_secret('CLOUDFLARE_ACCOUNT_ID', ''),
'api_token': get_secret('CLOUDFLARE_API_TOKEN', ''),
'vectorize_index': 'autorag-shy-cherry-f1fb',
'embed_model': '@cf/baai/bge-m3',
'autorag_domain': 'autorag.seekkey.tech'
}
def get_database_config() -> Dict[str, str]:
"""
获取数据库配置
"""
return {
'postgres_url': get_secret('POSTGRES_URL', ''),
'mongodb_url': get_secret('MONGODB_URL', ''),
'zilliz_url': get_secret('ZILLIZ_URL', ''),
'zilliz_token': get_secret('ZILLIZ_TOKEN', '')
}
def validate_config(mode: str = "hybrid") -> bool:
"""
验证必要的配置是否存在
"""
print(f"🔧 Validating configuration for mode: {mode}")
try:
if mode == "openrouter":
get_openrouter_key()
print("✅ OpenRouter configuration is valid.")
elif mode == "google_adk":
config = get_google_genai_config()
if not config['use_vertex_ai']:
get_google_api_key()
else:
if not config['project_id']:
raise ValueError("GOOGLE_CLOUD_PROJECT_ID is required for Vertex AI")
print("✅ Google ADK configuration is valid.")
elif mode == "hybrid":
# In hybrid, we just need at least one to be available
key_found = False
try:
get_openrouter_key()
print("✅ OpenRouter key found.")
key_found = True
except ValueError:
pass # It's ok if this one is missing
try:
google_config = get_google_genai_config()
if not google_config['use_vertex_ai']:
get_google_api_key()
print("✅ Google ADK key found.")
key_found = True
except ValueError:
pass # It's ok if this one is missing
if not key_found:
raise ValueError("In hybrid mode, at least one AI provider key (OpenRouter or Google) must be configured.")
print("✅ Hybrid mode configuration is valid.")
return True
except ValueError as e:
print(f"❌ Configuration validation failed: {e}")
return False
if __name__ == "__main__":
print("🔧 Running configuration validation...")
validate_config()

View File

@@ -0,0 +1,76 @@
#!/usr/bin/env python3
"""
API健康检查模块
用于测试与外部服务的连接如OpenRouter和RapidAPI。
"""
import os
import requests
import sys
from pathlib import Path
# 将项目根目录添加到Python路径以便导入config模块
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))
from config.settings import get_openrouter_key, get_rapidapi_key
def test_openrouter_api() -> bool:
"""
测试与OpenRouter API的连接和认证。
"""
api_key = get_openrouter_key()
if not api_key:
print("❌ OpenRouter API Key not found.")
return False
url = "https://openrouter.ai/api/v1/models"
headers = {"Authorization": f"Bearer {api_key}"}
try:
response = requests.get(url, headers=headers, timeout=10)
if response.status_code == 200:
print("✅ OpenRouter API connection successful.")
return True
else:
print(f"❌ OpenRouter API connection failed. Status: {response.status_code}, Response: {response.text[:100]}")
return False
except requests.RequestException as e:
print(f"❌ OpenRouter API request failed: {e}")
return False
def test_rapidapi_connection() -> bool:
"""
测试与RapidAPI的连接和认证。
这里我们使用一个简单的、免费的API端点进行测试。
"""
api_key = get_rapidapi_key()
if not api_key:
print("❌ RapidAPI Key not found.")
return False
# 使用一个通用的、通常可用的RapidAPI端点进行测试
url = "https://alpha-vantage.p.rapidapi.com/query"
querystring = {"function":"TOP_GAINERS_LOSERS"}
headers = {
"x-rapidapi-host": "alpha-vantage.p.rapidapi.com",
"x-rapidapi-key": api_key
}
try:
response = requests.get(url, headers=headers, params=querystring, timeout=15)
# Alpha Vantage的免费套餐可能会返回错误但只要RapidAPI认证通过状态码就不是401或403
if response.status_code not in [401, 403]:
print(f"✅ RapidAPI connection successful (Status: {response.status_code}).")
return True
else:
print(f"❌ RapidAPI authentication failed. Status: {response.status_code}, Response: {response.text[:100]}")
return False
except requests.RequestException as e:
print(f"❌ RapidAPI request failed: {e}")
return False
if __name__ == "__main__":
print("🩺 Running API Health Checks...")
test_openrouter_api()
test_rapidapi_connection()

View File

@@ -0,0 +1,33 @@
#!/bin/bash
# 环境状态检查脚本
echo "📊 环境状态检查"
echo "=================="
# Git 状态
echo "Git 状态:"
git status --short
echo ""
# 远程仓库状态
echo "远程仓库状态:"
git remote -v
echo ""
# 分支状态
echo "分支状态:"
git branch -a
echo ""
# 最新标签
echo "最新标签:"
git tag --sort=-version:refname | head -5
echo ""
# 提交历史
echo "最近提交:"
git log --oneline -5

View File

@@ -0,0 +1,35 @@
#!/bin/bash
# 快速发布脚本
VERSION=$1
ENV=$2
if [ -z "$VERSION" ] || [ -z "$ENV" ]; then
echo "用法: ./quick-release.sh <版本号> <环境>"
echo "环境选项: dev/staging/prod"
exit 1
fi
case $ENV in
canary)
git checkout main
git tag "v${VERSION}-canary"
git push canary main --tags
;;
dev)
git checkout main
git tag "v${VERSION}-dev"
git push dev main --tags
;;
beta)
git checkout main
git tag "v${VERSION}-beta"
git push beta main --tags
;;
*)
echo "无效的环境选项: canary/dev/beta"
exit 1
;;
esac
echo "✅ 发布完成: v${VERSION}-${ENV}"

View File

@@ -0,0 +1,35 @@
#!/bin/bash
# 快速回滚脚本
ENV=$1
VERSION=$2
if [ -z "$ENV" ] || [ -z "$VERSION" ]; then
echo "用法: ./rollback.sh <环境> <版本号>"
echo "环境选项: staging/prod"
exit 1
fi
case $ENV in
canary)
git checkout main
git reset --hard "v${VERSION}-canary"
git push canary main --force
;;
dev)
git checkout main
git reset --hard "v${VERSION}-dev"
git push dev main --force
;;
beta)
git checkout main
git reset --hard "v${VERSION}-beta"
git push beta main --force
;;
*)
echo "无效的环境选项: canary/dev/beta"
exit 1
;;
esac
echo "✅ 回滚完成: ${ENV} -> v${VERSION}"

View File

@@ -0,0 +1,229 @@
#!/bin/bash
# 六壬神鉴渐进发布环境配置脚本
set -e
echo "🚀 配置渐进发布环境..."
# 1. 配置 Git 别名简化操作
echo "配置 Git 别名..."
git config alias.deploy-staging '!git push staging staging:main'
git config alias.deploy-prod '!git push origin main'
git config alias.sync-all '!git fetch --all && git push --all'
git config alias.release-start '!git checkout develop && git pull && git checkout -b release/'
git config alias.release-finish '!git checkout main && git merge staging && git tag -a'
# 2. 创建发布分支
echo "创建发布分支..."
git checkout -b staging 2>/dev/null || git checkout staging
git checkout -b develop 2>/dev/null || git checkout develop
# 3. 推送分支到所有远程仓库
echo "推送分支到所有远程仓库..."
git push origin staging:staging 2>/dev/null || true
git push origin develop:develop 2>/dev/null || true
git push staging staging:main 2>/dev/null || true
git push staging develop:develop 2>/dev/null || true
# 4. 设置分支保护(需要管理员权限)
echo "设置分支保护规则..."
echo "⚠️ 请在 GitHub/GitLab/Gitea 后台手动设置以下分支保护:"
echo "- main 分支:需要 PR 审查,禁止直接推送"
echo "- staging 分支:需要 PR 审查,禁止直接推送"
echo "- develop 分支:需要 PR 审查,禁止直接推送"
# 5. 创建发布标签模板
echo "创建发布标签模板..."
cat > .gitmessage.txt << 'EOF'
# 发布标签模板
# 格式v主版本.次版本.修订版本-环境
#
# 示例:
# v1.2.0-canary (灰度发布)
# v1.2.0 (正式版本)
# v1.2.1-hotfix (热修复)
#
# 环境标识:
# -canary: 灰度发布
# -staging: 预发布测试
# -hotfix: 紧急修复
# 无后缀:正式版本
发布类型: [feature/bugfix/hotfix/docs]
影响范围: [core/api/ui/config]
测试状态: [passed/failed/pending]
回滚策略: [已准备/无需回滚]
EOF
git config commit.template .gitmessage.txt
# 6. 创建快速发布脚本
cat > scripts/quick-release.sh << 'EOF'
#!/bin/bash
# 快速发布脚本
VERSION=$1
ENV=$2
if [ -z "$VERSION" ] || [ -z "$ENV" ]; then
echo "用法: ./quick-release.sh <版本号> <环境>"
echo "环境选项: dev/staging/prod"
exit 1
fi
case $ENV in
dev)
git checkout develop
git tag "v${VERSION}-dev"
git push gitea develop --tags
;;
staging)
git checkout staging
git tag "v${VERSION}-staging"
git push staging staging:main --tags
;;
prod)
git checkout main
git tag "v${VERSION}"
git push origin main --tags
;;
*)
echo "无效的环境选项"
exit 1
;;
esac
echo "✅ 发布完成: v${VERSION}-${ENV}"
EOF
chmod +x scripts/quick-release.sh
# 7. 创建回滚脚本
cat > scripts/rollback.sh << 'EOF'
#!/bin/bash
# 快速回滚脚本
ENV=$1
VERSION=$2
if [ -z "$ENV" ] || [ -z "$VERSION" ]; then
echo "用法: ./rollback.sh <环境> <版本号>"
echo "环境选项: staging/prod"
exit 1
fi
case $ENV in
staging)
git checkout staging
git reset --hard "v${VERSION}-staging"
git push staging staging:main --force
;;
prod)
git checkout main
git reset --hard "v${VERSION}"
git push origin main --force
;;
*)
echo "无效的环境选项"
exit 1
;;
esac
echo "✅ 回滚完成: ${ENV} -> v${VERSION}"
EOF
chmod +x scripts/rollback.sh
# 8. 创建状态检查脚本
cat > scripts/check-status.sh << 'EOF'
#!/bin/bash
# 环境状态检查脚本
echo "📊 环境状态检查"
echo "=================="
# Git 状态
echo "Git 状态:"
git status --short
echo ""
# 远程仓库状态
echo "远程仓库状态:"
git remote -v
echo ""
# 分支状态
echo "分支状态:"
git branch -a
echo ""
# 最新标签
echo "最新标签:"
git tag --sort=-version:refname | head -5
echo ""
# 提交历史
echo "最近提交:"
git log --oneline -5
EOF
chmod +x scripts/check-status.sh
# 9. 创建 GitHub Actions 工作流目录
mkdir -p .github/workflows
# 10. 创建部署验证
echo "创建部署验证..."
cat > .github/workflows/deploy-validation.yml << 'EOF'
name: Deploy Validation
on:
push:
branches: [develop, staging, main]
jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run tests
run: |
python -m pytest tests/ -v
- name: Validate code style
run: |
pip install black flake8
black --check .
flake8 .
- name: Security scan
run: |
pip install safety bandit
safety check
bandit -r . -f json -o security-report.json
EOF
echo "✅ 渐进发布环境配置完成!"
echo ""
echo "📋 使用指南:"
echo "1. 查看状态: ./scripts/check-status.sh"
echo "2. 快速发布: ./scripts/quick-release.sh 1.0.0 staging"
echo "3. 紧急回滚: ./scripts/rollback.sh prod 1.0.0"
echo "4. Git 别名: git deploy-staging, git deploy-prod"
echo ""
echo "📚 详细文档: docs/development/GRADUAL_DEPLOYMENT_PLAN.md"

View File

@@ -0,0 +1,54 @@
#!/bin/bash
# 炼妖壶 (Lianyaohu) - 虚拟环境设置脚本
# 用于快速初始化项目开发环境
set -e # 遇到错误时退出
echo "🔧 开始设置炼妖壶项目虚拟环境..."
# 检查Python版本
echo "📋 检查Python版本..."
python3 --version
# 创建虚拟环境(如果不存在)
if [ ! -d "venv" ]; then
echo "🏗️ 创建虚拟环境..."
python3 -m venv venv
else
echo "✅ 虚拟环境已存在"
fi
# 激活虚拟环境
echo "🚀 激活虚拟环境..."
source venv/bin/activate
# 升级pip
echo "⬆️ 升级pip..."
pip install --upgrade pip
# 安装项目依赖
echo "📦 安装项目依赖..."
pip install -r requirements.txt
# 检查关键依赖
echo "🔍 检查关键依赖安装状态..."
echo " - streamlit: $(pip show streamlit | grep Version || echo '未安装')"
echo " - openai: $(pip show openai | grep Version || echo '未安装')"
echo " - google-cloud-aiplatform: $(pip show google-cloud-aiplatform | grep Version || echo '未安装')"
echo " - aiohttp: $(pip show aiohttp | grep Version || echo '未安装')"
echo "✨ 虚拟环境设置完成!"
echo ""
echo "📝 使用说明:"
echo " 1. 激活虚拟环境: source venv/bin/activate"
echo " 2. 运行辩论系统: python examples/debates/baxian_adk_gemini_debate.py"
echo " 3. 启动Web界面: streamlit run app.py (如果有)"
echo " 4. 退出虚拟环境: deactivate"
echo ""
echo "🔧 环境变量配置:"
echo " 请确保 .env 文件中配置了必要的API密钥"
echo " - GOOGLE_API_KEY (Google Gemini API)"
echo " - GOOGLE_CLOUD_PROJECT_ID (GCP项目ID)"
echo " - GOOGLE_CLOUD_LOCATION (GCP区域)"
echo ""
echo "🎉 准备就绪开始你的AI辩论之旅吧"

View File

@@ -0,0 +1,68 @@
#!/bin/bash
# Memory Bank Web界面启动脚本
# 自动设置环境并启动Streamlit应用
echo "🧠 启动Memory Bank Web界面..."
echo "================================"
# 检查是否在正确的目录
if [ ! -f "memory_bank_web_interface.py" ]; then
echo "❌ 错误: 未找到memory_bank_web_interface.py文件"
echo "请确保在正确的项目目录中运行此脚本"
exit 1
fi
# 检查虚拟环境
if [ ! -d "venv" ]; then
echo "📦 创建虚拟环境..."
python3 -m venv venv
fi
# 激活虚拟环境
echo "🔧 激活虚拟环境..."
source venv/bin/activate
# 检查并安装依赖
echo "📋 检查依赖包..."
# 检查streamlit
if ! python -c "import streamlit" 2>/dev/null; then
echo "📦 安装Streamlit..."
pip install streamlit
fi
# 检查Google Cloud依赖
if ! python -c "import google.cloud" 2>/dev/null; then
echo "📦 安装Google Cloud依赖..."
pip install google-cloud-aiplatform google-generativeai
fi
# 检查其他必要依赖
if ! python -c "import asyncio" 2>/dev/null; then
echo "📦 安装asyncio依赖..."
pip install asyncio
fi
# 检查Google Cloud认证
echo "🔐 检查Google Cloud认证..."
if ! gcloud auth application-default print-access-token >/dev/null 2>&1; then
echo "⚠️ 未检测到Google Cloud认证"
echo "正在启动认证流程..."
gcloud auth application-default login
fi
# 设置环境变量
export GOOGLE_CLOUD_PROJECT="inner-radius-469712-e9"
export GOOGLE_CLOUD_REGION="us-central1"
# 启动Streamlit应用
echo "🚀 启动Web界面..."
echo "================================"
echo "📱 Web界面将在浏览器中打开"
echo "🌐 默认地址: http://localhost:8501"
echo "⏹️ 按 Ctrl+C 停止服务"
echo "================================"
# 启动streamlit
streamlit run memory_bank_web_interface.py --server.port 8501 --server.address localhost

View File

@@ -0,0 +1,137 @@
import glob
import frontmatter
import datetime
import argparse
from pathlib import Path
# --- Configuration ---
# Directories to exclude from scanning
EXCLUDE_DIRS = ['venv', 'node_modules', '.git']
# Default metadata template for the --fix option
DEFAULT_METADATA_TEMPLATE = {
'title': "Default Title",
'status': "spring",
'owner': "TBD",
'created': datetime.date.today().strftime('%Y-%m-%d'),
'review_by': (datetime.date.today() + datetime.timedelta(days=180)).strftime('%Y-%m-%d'),
'tags': ["untagged"]
}
def get_project_files(project_root):
"""Get all markdown files, respecting exclusions."""
all_files = project_root.rglob('*.md')
filtered_files = []
for file_path in all_files:
if not any(excluded_dir in file_path.parts for excluded_dir in EXCLUDE_DIRS):
filtered_files.append(str(file_path))
return filtered_files
def add_default_frontmatter(file_path):
"""Adds a default YAML front matter block to a file that lacks one."""
try:
with open(file_path, 'r+', encoding='utf-8') as f:
content = f.read()
f.seek(0, 0)
# Create a new post object with default metadata and existing content
new_post = frontmatter.Post(content, **DEFAULT_METADATA_TEMPLATE)
# Write the serialized post (metadata + content) back to the file
f.write(frontmatter.dumps(new_post))
print(f"[FIXED] {file_path}: Added default front matter.")
return True
except Exception as e:
print(f"[CRITICAL] {file_path}: Could not apply fix. Error: {e}")
return False
def validate_doc_lifecycle(fix_missing=False):
"""
Scans and validates markdown files, with an option to fix files missing front matter.
"""
project_root = Path(__file__).parent.parent
markdown_files = get_project_files(project_root)
print(f"Scanning {len(markdown_files)} Markdown files (vendor directories excluded)...")
all_docs = []
errors = []
warnings = []
fixed_count = 0
for md_file in markdown_files:
try:
post = frontmatter.load(md_file)
metadata = post.metadata
if not metadata:
if fix_missing:
if add_default_frontmatter(md_file):
fixed_count += 1
else:
warnings.append(f"[SKIPPED] {md_file}: No YAML front matter found. Use --fix to add a template.")
continue
doc_info = {'path': md_file}
required_fields = ['title', 'status', 'owner', 'created', 'review_by']
missing_fields = [field for field in required_fields if field not in metadata]
if missing_fields:
errors.append(f"[ERROR] {md_file}: Missing required fields: {', '.join(missing_fields)}")
continue
doc_info.update(metadata)
allowed_statuses = ['spring', 'summer', 'autumn', 'winter']
if metadata.get('status') not in allowed_statuses:
errors.append(f"[ERROR] {md_file}: Invalid status '{metadata.get('status')}'. Must be one of {allowed_statuses}")
review_by_date = metadata.get('review_by')
if review_by_date:
if isinstance(review_by_date, str):
review_by_date = datetime.datetime.strptime(review_by_date, '%Y-%m-%d').date()
if review_by_date < datetime.date.today():
warnings.append(f"[WARNING] {md_file}: Review date ({review_by_date}) has passed.")
all_docs.append(doc_info)
except Exception as e:
errors.append(f"[CRITICAL] {md_file}: Could not parse file. Error: {e}")
print("\n--- Validation Report ---")
if not errors and not warnings:
print("✅ All documents with front matter are valid and up-to-date.")
if warnings:
print("\n⚠️ Warnings:")
for warning in warnings:
print(warning)
if errors:
print("\n❌ Errors:")
for error in errors:
print(error)
print(f"\n--- Summary ---")
print(f"Total files scanned: {len(markdown_files)}")
print(f"Files with valid front matter: {len(all_docs)}")
if fix_missing:
print(f"Files automatically fixed: {fixed_count}")
print(f"Warnings: {len(warnings)}")
print(f"Errors: {len(errors)}")
return len(errors) == 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Validate and manage the lifecycle of Markdown documents.")
parser.add_argument(
'--fix',
action='store_true',
help="Automatically add a default front matter template to any document that is missing one."
)
args = parser.parse_args()
is_valid = validate_doc_lifecycle(fix_missing=args.fix)
if not is_valid:
exit(1)

View File

@@ -0,0 +1,138 @@
// 查询术数书内容的脚本
// 通过 Hyperdrive API 查询 NeonDB 中的术数书数据
const API_BASE_URL = 'https://hyperdrive.seekkey.tech';
// 通用请求函数
async function apiRequest(endpoint, options = {}) {
const url = `${API_BASE_URL}${endpoint}`;
const headers = {
'Content-Type': 'application/json',
...options.headers
};
try {
const response = await fetch(url, {
...options,
headers
});
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
const contentType = response.headers.get('content-type');
if (contentType && contentType.includes('application/json')) {
return await response.json();
} else {
return await response.text();
}
} catch (error) {
console.error(`Request failed for ${endpoint}:`, error.message);
throw error;
}
}
// 查询数据库表结构
async function queryTables() {
console.log('\n📋 查询数据库表结构...');
try {
const result = await apiRequest('/query-tables');
console.log('✅ 数据库表:', result);
return result;
} catch (error) {
console.log('❌ 查询表结构失败:', error.message);
return null;
}
}
// 查询术数书内容
async function queryShushuBook(limit = 10) {
console.log('\n📚 查询术数书内容...');
try {
const result = await apiRequest(`/query-shushu?limit=${limit}`);
console.log('✅ 术数书内容:', JSON.stringify(result, null, 2));
return result;
} catch (error) {
console.log('❌ 查询术数书失败:', error.message);
return null;
}
}
// 搜索术数书内容
async function searchShushuBook(keyword, limit = 5) {
console.log(`\n🔍 搜索术数书内容: "${keyword}"...`);
try {
const result = await apiRequest(`/search-shushu?q=${encodeURIComponent(keyword)}&limit=${limit}`);
console.log('✅ 搜索结果:', JSON.stringify(result, null, 2));
return result;
} catch (error) {
console.log('❌ 搜索失败:', error.message);
return null;
}
}
// 获取术数书统计信息
async function getShushuStats() {
console.log('\n📊 获取术数书统计信息...');
try {
const result = await apiRequest('/shushu-stats');
console.log('✅ 统计信息:', JSON.stringify(result, null, 2));
return result;
} catch (error) {
console.log('❌ 获取统计信息失败:', error.message);
return null;
}
}
// 主函数
async function main() {
console.log('🚀 术数书查询脚本');
console.log('==================');
// 首先测试连接
console.log('\n🔗 测试 Hyperdrive 连接...');
try {
const connectionTest = await apiRequest('/test-connection');
console.log('✅ 连接成功:', connectionTest.message);
} catch (error) {
console.log('❌ 连接失败:', error.message);
return;
}
// 查询表结构
await queryTables();
// 获取统计信息
await getShushuStats();
// 查询术数书内容
await queryShushuBook(5);
// 搜索示例
await searchShushuBook('易经');
await searchShushuBook('八卦');
await searchShushuBook('太公');
}
// 如果是 Node.js 环境,导入 fetch
if (typeof window === 'undefined') {
// Node.js 环境
const { default: fetch } = require('node-fetch');
global.fetch = fetch;
main().catch(console.error);
} else {
// 浏览器环境
console.log('在浏览器控制台中运行: main()');
}
// 导出函数供其他模块使用
if (typeof module !== 'undefined' && module.exports) {
module.exports = {
queryTables,
queryShushuBook,
searchShushuBook,
getShushuStats,
main
};
}

View File

@@ -0,0 +1,107 @@
// Simple configuration validation script
// This validates the wrangler.toml and Worker code without requiring API access
const fs = require('fs');
const path = require('path');
console.log('🔍 Validating Hyperdrive Configuration Files');
console.log('============================================');
// Check wrangler.toml
console.log('\n📋 Checking wrangler.toml...');
try {
const wranglerContent = fs.readFileSync('wrangler.toml', 'utf8');
console.log('✅ wrangler.toml exists');
// Check for required fields
const checks = [
{ field: 'name', regex: /name\s*=\s*["']([^"']+)["']/, required: true },
{ field: 'main', regex: /main\s*=\s*["']([^"']+)["']/, required: true },
{ field: 'compatibility_date', regex: /compatibility_date\s*=\s*["']([^"']+)["']/, required: true },
{ field: 'nodejs_compat', regex: /nodejs_compat/, required: true },
{ field: 'hyperdrive binding', regex: /binding\s*=\s*["']HYPERDRIVE["']/, required: true },
{ field: 'hyperdrive id', regex: /id\s*=\s*["']ef43924d89064cddabfaccf06aadfab6["']/, required: true }
];
checks.forEach(check => {
if (check.regex.test(wranglerContent)) {
console.log(`${check.field} configured`);
} else {
console.log(`${check.field} missing or incorrect`);
}
});
} catch (error) {
console.log('❌ wrangler.toml not found or unreadable');
}
// Check Worker code
console.log('\n📝 Checking Worker code...');
try {
const workerContent = fs.readFileSync('src/index.ts', 'utf8');
console.log('✅ src/index.ts exists');
const codeChecks = [
{ name: 'Hyperdrive binding usage', regex: /env\.HYPERDRIVE/ },
{ name: 'Test connection endpoint', regex: /\/test-connection/ },
{ name: 'Test query endpoint', regex: /\/test-query/ },
{ name: 'PostgreSQL import', regex: /pg/ },
{ name: 'Error handling', regex: /try\s*{[\s\S]*catch/ }
];
codeChecks.forEach(check => {
if (check.regex.test(workerContent)) {
console.log(`${check.name} implemented`);
} else {
console.log(` ⚠️ ${check.name} not found`);
}
});
} catch (error) {
console.log('❌ src/index.ts not found or unreadable');
}
// Check package.json
console.log('\n📦 Checking package.json...');
try {
const packageContent = fs.readFileSync('package.json', 'utf8');
const packageJson = JSON.parse(packageContent);
console.log('✅ package.json exists and is valid JSON');
const deps = {
'pg': packageJson.dependencies?.pg,
'@cloudflare/workers-types': packageJson.devDependencies?.['@cloudflare/workers-types'],
'@types/pg': packageJson.devDependencies?.['@types/pg'],
'typescript': packageJson.devDependencies?.typescript,
'wrangler': packageJson.devDependencies?.wrangler
};
Object.entries(deps).forEach(([dep, version]) => {
if (version) {
console.log(`${dep}: ${version}`);
} else {
console.log(`${dep}: not found`);
}
});
} catch (error) {
console.log('❌ package.json not found or invalid JSON');
}
console.log('\n📊 Configuration Summary:');
console.log(' - Project: hyperdrive-neondb-test');
console.log(' - Hyperdrive ID: ef43924d89064cddabfaccf06aadfab6');
console.log(' - Database: NeonDB (PostgreSQL)');
console.log(' - Binding: HYPERDRIVE');
console.log(' - Compatibility: nodejs_compat enabled');
console.log('\n🚀 Next Steps:');
console.log(' 1. Ensure you have proper Cloudflare API permissions');
console.log(' 2. Verify the Hyperdrive configuration exists in your Cloudflare dashboard');
console.log(' 3. Deploy with: wrangler deploy');
console.log(' 4. Test endpoints after deployment');
console.log('\n💡 Troubleshooting:');
console.log(' - If API token has insufficient permissions, use: wrangler login');
console.log(' - Check Hyperdrive exists: https://dash.cloudflare.com/[account-id]/workers/hyperdrive');
console.log(' - Verify NeonDB connection string is correct in Hyperdrive config');

View File

@@ -0,0 +1,16 @@
name = "hyperdrive-neondb-test"
main = "src/index.ts"
compatibility_date = "2025-02-04"
# Add nodejs_compat compatibility flag to support common database drivers
compatibility_flags = ["nodejs_compat"]
[observability]
enabled = true
# Hyperdrive configuration for NeonDB
[[hyperdrive]]
binding = "HYPERDRIVE"
id = "ef43924d89064cddabfaccf06aadfab6"
# For local development, use a local PostgreSQL connection
localConnectionString = "postgresql://postgres:password@localhost:5432/testdb"

View File

@@ -0,0 +1,150 @@
#!/usr/bin/env python3
"""
详细查看和测试Vertex AI Memory Bank功能
"""
import sys
import os
import asyncio
import json
from datetime import datetime
sys.path.append('src')
from jixia.memory.factory import get_memory_backend
from config.doppler_config import get_google_genai_config
async def test_memory_bank_functionality():
print("🧠 详细测试Memory Bank功能")
print("=" * 60)
# 获取配置
config = get_google_genai_config()
project_id = config.get('project_id')
location = config.get('location', 'us-central1')
print(f"📊 项目ID: {project_id}")
print(f"📍 位置: {location}")
print(f"🕐 测试时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print()
try:
# 获取Memory Bank后端
memory_backend = get_memory_backend()
print(f"✅ Memory Bank后端: {type(memory_backend).__name__}")
print()
# 选择一个智能体进行详细测试
test_agent = "lvdongbin"
print(f"🧙‍♂️ 测试智能体: {test_agent} (吕洞宾)")
print("-" * 40)
# 1. 创建/获取Memory Bank
print("1⃣ 创建Memory Bank...")
memory_bank_id = await memory_backend.create_memory_bank(
agent_name=test_agent,
display_name=f"测试Memory Bank - {test_agent}"
)
print(f" ✅ Memory Bank ID: {memory_bank_id}")
print()
# 2. 添加不同类型的记忆
print("2⃣ 添加测试记忆...")
# 添加对话记忆
conversation_memory = await memory_backend.add_memory(
agent_name=test_agent,
content="在关于AI伦理的辩论中我强调了技术发展应该以人为本不能忽视道德考量。",
memory_type="conversation",
debate_topic="AI伦理与技术发展",
metadata={"opponent": "铁拐李", "stance": "支持伦理优先"}
)
print(f" 📝 对话记忆: {conversation_memory}")
# 添加偏好记忆
preference_memory = await memory_backend.add_memory(
agent_name=test_agent,
content="我偏好使用古典哲学的智慧来论证现代问题,特别是道家思想。",
memory_type="preference",
metadata={"philosophy": "道家", "style": "古典智慧"}
)
print(f" ⚙️ 偏好记忆: {preference_memory}")
# 添加知识记忆
knowledge_memory = await memory_backend.add_memory(
agent_name=test_agent,
content="区块链技术的核心是去中心化和不可篡改性,这与道家'无为而治'的理念有相通之处。",
memory_type="knowledge",
debate_topic="区块链技术应用",
metadata={"domain": "技术", "connection": "哲学"}
)
print(f" 📚 知识记忆: {knowledge_memory}")
# 添加策略记忆
strategy_memory = await memory_backend.add_memory(
agent_name=test_agent,
content="在辩论中,当对手使用激进论点时,我会用温和的反问来引导思考,而不是直接对抗。",
memory_type="strategy",
metadata={"tactic": "温和引导", "effectiveness": ""}
)
print(f" 🎯 策略记忆: {strategy_memory}")
print()
# 3. 测试记忆搜索
print("3⃣ 测试记忆搜索...")
# 搜索关于AI的记忆
ai_memories = await memory_backend.search_memories(
agent_name=test_agent,
query="AI 人工智能 伦理",
limit=5
)
print(f" 🔍 搜索'AI 人工智能 伦理': 找到 {len(ai_memories)} 条记忆")
for i, memory in enumerate(ai_memories, 1):
print(f" {i}. {memory.get('content', '')[:50]}...")
print()
# 搜索策略相关记忆
strategy_memories = await memory_backend.search_memories(
agent_name=test_agent,
query="辩论 策略",
memory_type="strategy",
limit=3
)
print(f" 🎯 搜索策略记忆: 找到 {len(strategy_memories)} 条记忆")
for i, memory in enumerate(strategy_memories, 1):
print(f" {i}. {memory.get('content', '')[:50]}...")
print()
# 4. 获取智能体上下文
print("4⃣ 获取智能体上下文...")
context = await memory_backend.get_agent_context(
agent_name=test_agent,
debate_topic="AI伦理与技术发展"
)
print(f" 📋 上下文长度: {len(context)} 字符")
print(f" 📋 上下文预览: {context[:200]}...")
print()
# 5. 显示所有记忆类型的统计
print("5⃣ 记忆统计...")
memory_types = ["conversation", "preference", "knowledge", "strategy"]
for mem_type in memory_types:
memories = await memory_backend.search_memories(
agent_name=test_agent,
query="",
memory_type=mem_type,
limit=100
)
print(f" 📊 {mem_type}: {len(memories)} 条记忆")
print()
print("🎉 Memory Bank功能测试完成!")
print("=" * 60)
except Exception as e:
print(f"❌ 测试失败: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
asyncio.run(test_memory_bank_functionality())

View File

@@ -0,0 +1,83 @@
#!/usr/bin/env python3
"""
使用项目现有的Memory Bank代码来查看实例
"""
import sys
import os
import asyncio
sys.path.append('src')
from jixia.memory.factory import get_memory_backend
from config.doppler_config import get_google_genai_config
async def list_memory_banks():
"""使用项目的Memory Bank工厂来查看实例"""
print("🧠 使用项目Memory Bank工厂查看实例")
print("="*50)
try:
# 获取配置
config = get_google_genai_config()
print(f"📊 项目ID: {config.get('project_id')}")
print(f"📍 位置: {config.get('location')}")
print(f"🔑 Memory Bank启用: {config.get('memory_bank_enabled')}")
# 获取Memory Bank后端
print("\n🔍 正在获取Memory Bank后端...")
memory_backend = get_memory_backend()
print(f"✅ 成功获取Memory Bank后端: {type(memory_backend).__name__}")
# 八仙列表
immortals = [
"tieguaili", "zhongliquan", "lvdongbin", "hehe_erxian",
"lantsaihe", "hanxiangzi", "caoguo_jiu", "hexiangu"
]
print(f"\n🔍 正在检查八仙的Memory Bank实例...")
print("="*50)
for immortal in immortals:
try:
print(f"\n🧙‍♂️ {immortal}:")
# 尝试创建Memory Bank
memory_bank_id = await memory_backend.create_memory_bank(immortal)
print(f" ✅ Memory Bank ID: {memory_bank_id}")
# 尝试搜索一些记忆(如果有的话)
try:
memories = await memory_backend.search_memories(immortal, "投资", limit=3)
if memories:
print(f" 📝 找到 {len(memories)} 条记忆")
for i, memory in enumerate(memories[:2], 1):
content = memory.get('content', '无内容')[:50]
print(f" {i}. {content}...")
else:
print(f" 📭 暂无记忆")
except Exception as e:
print(f" ⚠️ 无法搜索记忆: {str(e)[:50]}...")
except Exception as e:
print(f" ❌ 错误: {str(e)[:50]}...")
print(f"\n🎉 Memory Bank检查完成!")
except Exception as e:
print(f"\n❌ 主要错误: {str(e)}")
print(f"🔧 错误类型: {type(e).__name__}")
# 显示一些调试信息
print("\n🔍 调试信息:")
print(f" Python路径: {sys.path[:3]}...")
print(f" 当前目录: {os.getcwd()}")
print(f" 环境变量:")
for key in ['GOOGLE_API_KEY', 'GOOGLE_CLOUD_PROJECT_ID', 'VERTEX_MEMORY_BANK_ENABLED']:
value = os.getenv(key, '未设置')
if 'API_KEY' in key and value != '未设置':
value = value[:10] + '...' if len(value) > 10 else value
print(f" {key}: {value}")
if __name__ == "__main__":
asyncio.run(list_memory_banks())

View File

@@ -0,0 +1,299 @@
#!/usr/bin/env python3
"""
Vertex AI Memory Bank Web界面
一个简单的Streamlit应用用于通过Web界面访问和管理Memory Bank
"""
import streamlit as st
import asyncio
import sys
import os
from datetime import datetime
# 添加项目路径
sys.path.append('/Users/ben/liurenchaxin/src')
try:
from jixia.memory.factory import get_memory_backend
except ImportError as e:
st.error(f"无法导入jixia模块: {e}")
st.info("请确保已激活虚拟环境并安装了所需依赖")
st.stop()
# 页面配置
st.set_page_config(
page_title="Memory Bank 管理界面",
page_icon="🧠",
layout="wide"
)
# 标题
st.title("🧠 Vertex AI Memory Bank 管理界面")
st.markdown("---")
# 侧边栏配置
st.sidebar.header("配置")
project_id = st.sidebar.text_input("项目ID", value="inner-radius-469712-e9")
location = st.sidebar.text_input("区域", value="us-central1")
# 八仙列表
EIGHT_IMMORTALS = [
"lvdongbin", "tieguaili", "hanxiangzi", "lanzaihe",
"hesengu", "zhonghanli", "caogujiu", "hanzhongli"
]
# 缓存Memory Bank后端
@st.cache_resource
def get_memory_backend_cached():
"""获取Memory Bank后端缓存"""
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
backend = loop.run_until_complete(get_memory_backend("vertex"))
return backend, loop
except Exception as e:
st.error(f"初始化Memory Bank失败: {e}")
return None, None
# 异步函数包装器
def run_async(coro):
"""运行异步函数"""
backend, loop = get_memory_backend_cached()
if backend is None:
return None
try:
return loop.run_until_complete(coro)
except Exception as e:
st.error(f"操作失败: {e}")
return None
# 主界面
tab1, tab2, tab3, tab4 = st.tabs(["📋 Memory Bank列表", "🔍 搜索记忆", " 添加记忆", "📊 统计信息"])
with tab1:
st.header("Memory Bank 实例列表")
if st.button("🔄 刷新列表", key="refresh_list"):
st.rerun()
# 显示八仙Memory Bank状态
cols = st.columns(4)
for i, immortal in enumerate(EIGHT_IMMORTALS):
with cols[i % 4]:
with st.container():
st.subheader(f"🧙‍♂️ {immortal}")
# 检查Memory Bank状态
backend, _ = get_memory_backend_cached()
if backend:
try:
# 尝试获取agent context来验证Memory Bank存在
context = run_async(backend.get_agent_context(immortal))
if context is not None:
st.success("✅ 活跃")
# 显示记忆数量
memories = run_async(backend.search_memories(immortal, "", limit=100))
if memories:
st.info(f"📝 记忆数量: {len(memories)}")
else:
st.info("📝 记忆数量: 0")
else:
st.warning("⚠️ 未初始化")
except Exception as e:
st.error(f"❌ 错误: {str(e)[:50]}...")
else:
st.error("❌ 连接失败")
with tab2:
st.header("🔍 搜索记忆")
col1, col2 = st.columns([1, 2])
with col1:
selected_agent = st.selectbox("选择Agent", EIGHT_IMMORTALS, key="search_agent")
search_query = st.text_input("搜索关键词", placeholder="输入要搜索的内容...", key="search_query")
search_limit = st.slider("结果数量", 1, 50, 10, key="search_limit")
if st.button("🔍 搜索", key="search_button"):
if search_query:
with st.spinner("搜索中..."):
backend, _ = get_memory_backend_cached()
if backend:
memories = run_async(backend.search_memories(selected_agent, search_query, limit=search_limit))
st.session_state['search_results'] = memories
st.session_state['search_agent'] = selected_agent
st.session_state['search_query'] = search_query
else:
st.warning("请输入搜索关键词")
with col2:
st.subheader("搜索结果")
if 'search_results' in st.session_state and st.session_state['search_results']:
st.success(f"找到 {len(st.session_state['search_results'])} 条记忆")
for i, memory in enumerate(st.session_state['search_results']):
with st.expander(f"记忆 {i+1}: {memory.get('content', 'N/A')[:50]}..."):
st.write(f"**内容**: {memory.get('content', 'N/A')}")
st.write(f"**类型**: {memory.get('memory_type', 'N/A')}")
st.write(f"**时间**: {memory.get('timestamp', 'N/A')}")
if 'metadata' in memory:
st.write(f"**元数据**: {memory['metadata']}")
elif 'search_results' in st.session_state:
st.info("未找到匹配的记忆")
else:
st.info("请执行搜索以查看结果")
with tab3:
st.header(" 添加记忆")
col1, col2 = st.columns([1, 1])
with col1:
add_agent = st.selectbox("选择Agent", EIGHT_IMMORTALS, key="add_agent")
memory_type = st.selectbox("记忆类型", ["conversation", "preference", "knowledge", "strategy"], key="memory_type")
memory_content = st.text_area("记忆内容", placeholder="输入要添加的记忆内容...", height=150, key="memory_content")
# 可选的元数据
st.subheader("元数据(可选)")
importance = st.slider("重要性", 1, 10, 5, key="importance")
tags = st.text_input("标签(用逗号分隔)", placeholder="标签1, 标签2, 标签3", key="tags")
if st.button(" 添加记忆", key="add_memory_button"):
if memory_content:
with st.spinner("添加记忆中..."):
backend, _ = get_memory_backend_cached()
if backend:
# 准备元数据
metadata = {
"importance": importance,
"timestamp": datetime.now().isoformat(),
"source": "web_interface"
}
if tags:
metadata["tags"] = [tag.strip() for tag in tags.split(",")]
# 添加记忆
success = run_async(backend.add_memory(
agent_id=add_agent,
content=memory_content,
memory_type=memory_type,
metadata=metadata
))
if success:
st.success("✅ 记忆添加成功!")
# 清空输入
st.session_state['memory_content'] = ""
st.session_state['tags'] = ""
else:
st.error("❌ 添加记忆失败")
else:
st.warning("请输入记忆内容")
with col2:
st.subheader("添加记忆预览")
if memory_content:
st.info(f"**Agent**: {add_agent}")
st.info(f"**类型**: {memory_type}")
st.info(f"**内容**: {memory_content}")
st.info(f"**重要性**: {importance}/10")
if tags:
st.info(f"**标签**: {tags}")
else:
st.info("输入记忆内容以查看预览")
with tab4:
st.header("📊 统计信息")
if st.button("🔄 刷新统计", key="refresh_stats"):
st.rerun()
# 获取统计信息
backend, _ = get_memory_backend_cached()
if backend:
stats_data = []
for immortal in EIGHT_IMMORTALS:
try:
# 获取记忆数量
memories = run_async(backend.search_memories(immortal, "", limit=1000))
memory_count = len(memories) if memories else 0
# 获取agent context
context = run_async(backend.get_agent_context(immortal))
status = "活跃" if context else "未初始化"
stats_data.append({
"Agent": immortal,
"状态": status,
"记忆数量": memory_count,
"最后更新": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
except Exception as e:
stats_data.append({
"Agent": immortal,
"状态": "错误",
"记忆数量": 0,
"最后更新": f"错误: {str(e)[:30]}..."
})
# 显示统计表格
st.dataframe(stats_data, use_container_width=True)
# 显示汇总信息
col1, col2, col3, col4 = st.columns(4)
total_agents = len(EIGHT_IMMORTALS)
active_agents = sum(1 for item in stats_data if item["状态"] == "活跃")
total_memories = sum(item["记忆数量"] for item in stats_data)
avg_memories = total_memories / total_agents if total_agents > 0 else 0
with col1:
st.metric("总Agent数", total_agents)
with col2:
st.metric("活跃Agent数", active_agents)
with col3:
st.metric("总记忆数", total_memories)
with col4:
st.metric("平均记忆数", f"{avg_memories:.1f}")
# 页脚
st.markdown("---")
st.markdown(
"""
<div style='text-align: center; color: #666;'>
🧠 Vertex AI Memory Bank Web界面 |
<a href='https://cloud.google.com/vertex-ai/generative-ai/docs/agent-engine/memory-bank/overview' target='_blank'>官方文档</a>
</div>
""",
unsafe_allow_html=True
)
# 使用说明
with st.expander("📖 使用说明"):
st.markdown("""
### 功能说明
1. **Memory Bank列表**: 查看所有八仙角色的Memory Bank状态和记忆数量
2. **搜索记忆**: 在指定Agent的记忆中搜索特定内容
3. **添加记忆**: 为Agent添加新的记忆支持不同类型和元数据
4. **统计信息**: 查看所有Agent的统计数据和汇总信息
### 使用前准备
1. 确保已激活虚拟环境: `source venv/bin/activate`
2. 确保已设置Google Cloud认证: `gcloud auth application-default login`
3. 运行此界面: `streamlit run memory_bank_web_interface.py`
### 注意事项
- Memory Bank目前仅在us-central1区域可用
- 搜索功能支持模糊匹配
- 添加的记忆会立即生效
- 统计信息实时更新
""")

View File

@@ -0,0 +1,154 @@
#!/usr/bin/env python3
"""
通过Google Cloud Console查看Memory Bank资源
"""
import sys
import os
import asyncio
import json
import subprocess
from datetime import datetime
sys.path.append('src')
from config.doppler_config import get_google_genai_config
def get_access_token():
"""获取Google Cloud访问令牌"""
try:
result = subprocess.run(
['gcloud', 'auth', 'print-access-token'],
capture_output=True,
text=True,
check=True
)
return result.stdout.strip()
except subprocess.CalledProcessError as e:
print(f"❌ 获取访问令牌失败: {e}")
return None
def make_api_request(url, token):
"""发起API请求"""
import requests
headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json'
}
try:
response = requests.get(url, headers=headers)
return response.status_code, response.json() if response.content else {}
except Exception as e:
return None, str(e)
def main():
print("🔍 通过GCP API查看Memory Bank资源")
print("=" * 60)
# 获取配置
config = get_google_genai_config()
project_id = config.get('project_id')
location = config.get('location', 'us-central1')
print(f"📊 项目ID: {project_id}")
print(f"📍 位置: {location}")
print(f"🕐 查询时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print()
# 获取访问令牌
print("🔑 获取访问令牌...")
token = get_access_token()
if not token:
print("❌ 无法获取访问令牌")
return
print(f"✅ 访问令牌: {token[:20]}...")
print()
# 尝试不同的API端点
api_endpoints = [
# Vertex AI API
f"https://aiplatform.googleapis.com/v1/projects/{project_id}/locations/{location}/operations",
f"https://aiplatform.googleapis.com/v1beta1/projects/{project_id}/locations/{location}/operations",
# Generative Language API
f"https://generativelanguage.googleapis.com/v1beta/projects/{project_id}/locations/{location}/operations",
# 尝试Memory Bank相关端点
f"https://aiplatform.googleapis.com/v1/projects/{project_id}/locations/{location}/memoryBanks",
f"https://aiplatform.googleapis.com/v1beta1/projects/{project_id}/locations/{location}/memoryBanks",
# 尝试其他可能的端点
f"https://generativelanguage.googleapis.com/v1beta/projects/{project_id}/locations/{location}/memoryBanks",
f"https://generativelanguage.googleapis.com/v1/projects/{project_id}/locations/{location}/memoryBanks",
]
print("🌐 测试API端点...")
print("-" * 40)
for i, endpoint in enumerate(api_endpoints, 1):
print(f"{i}. 测试: {endpoint.split('/')[-2]}/{endpoint.split('/')[-1]}")
status_code, response = make_api_request(endpoint, token)
if status_code == 200:
print(f" ✅ 成功 (200): 找到 {len(response.get('operations', response.get('memoryBanks', [])))} 个资源")
if response:
print(f" 📄 响应预览: {str(response)[:100]}...")
elif status_code == 404:
print(f" ⚠️ 未找到 (404): 端点不存在")
elif status_code == 403:
print(f" 🚫 权限不足 (403): 需要更多权限")
elif status_code:
print(f" ❌ 错误 ({status_code}): {str(response)[:50]}...")
else:
print(f" 💥 请求失败: {response}")
print()
# 查看项目信息
print("📋 项目信息...")
project_url = f"https://cloudresourcemanager.googleapis.com/v1/projects/{project_id}"
status_code, response = make_api_request(project_url, token)
if status_code == 200:
print(f" ✅ 项目名称: {response.get('name', 'N/A')}")
print(f" 📊 项目编号: {response.get('projectNumber', 'N/A')}")
print(f" 🏷️ 项目ID: {response.get('projectId', 'N/A')}")
print(f" 📅 创建时间: {response.get('createTime', 'N/A')}")
print(f" 🔄 生命周期: {response.get('lifecycleState', 'N/A')}")
else:
print(f" ❌ 无法获取项目信息: {status_code}")
print()
# 查看启用的服务
print("🔧 查看启用的AI相关服务...")
services_url = f"https://serviceusage.googleapis.com/v1/projects/{project_id}/services"
status_code, response = make_api_request(services_url, token)
if status_code == 200:
services = response.get('services', [])
ai_services = [s for s in services if 'ai' in s.get('config', {}).get('name', '').lower() or 'generative' in s.get('config', {}).get('name', '').lower()]
print(f" 📊 总服务数: {len(services)}")
print(f" 🤖 AI相关服务: {len(ai_services)}")
for service in ai_services[:10]: # 显示前10个
name = service.get('config', {}).get('name', 'Unknown')
state = service.get('state', 'Unknown')
print(f"{name}: {state}")
else:
print(f" ❌ 无法获取服务信息: {status_code}")
print()
print("🎯 Memory Bank访问建议:")
print(" 1. 在Google Cloud Console中访问:")
print(f" https://console.cloud.google.com/vertex-ai/generative/memory-banks?project={project_id}")
print(" 2. 或者访问Vertex AI主页:")
print(f" https://console.cloud.google.com/vertex-ai?project={project_id}")
print(" 3. Memory Bank功能可能在'生成式AI''实验性功能'部分")
print()
print("🎉 GCP API查询完成!")
print("=" * 60)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,112 @@
#!/usr/bin/env python3
"""
RapidAPI检查工具
从cauldron_new迁移的简化版本
"""
import requests
import time
from typing import Dict, List, Any
from config.settings import get_rapidapi_key
class RapidAPIChecker:
"""RapidAPI服务检查器"""
def __init__(self):
"""初始化检查器"""
try:
self.api_key = get_rapidapi_key()
except Exception as e:
print(f"❌ 无法获取RapidAPI密钥: {e}")
self.api_key = ""
self.headers = {
'X-RapidAPI-Key': self.api_key,
'Content-Type': 'application/json'
}
def test_api(self, host: str, endpoint: str, params: Dict = None, method: str = 'GET') -> Dict[str, Any]:
"""
测试特定的RapidAPI服务
Args:
host: API主机名
endpoint: API端点
params: 请求参数
method: HTTP方法
Returns:
测试结果
"""
self.headers['X-RapidAPI-Host'] = host
url = f"https://{host}{endpoint}"
try:
if method.upper() == 'GET':
response = requests.get(url, headers=self.headers, params=params, timeout=8)
else:
response = requests.post(url, headers=self.headers, json=params, timeout=8)
return {
'success': response.status_code == 200,
'status_code': response.status_code,
'response_size': len(response.text),
'response_time': response.elapsed.total_seconds(),
'error': None if response.status_code == 200 else response.text[:200]
}
except Exception as e:
return {
'success': False,
'status_code': None,
'response_size': 0,
'response_time': 0,
'error': str(e)
}
def check_common_apis(self) -> Dict[str, Dict[str, Any]]:
"""检查常用的RapidAPI服务"""
print("🔍 检查RapidAPI订阅状态")
# 常用API列表
apis_to_check = [
{
'name': 'Yahoo Finance',
'host': 'yahoo-finance15.p.rapidapi.com',
'endpoint': '/api/yahoo/qu/quote/AAPL'
},
{
'name': 'Alpha Vantage',
'host': 'alpha-vantage.p.rapidapi.com',
'endpoint': '/query?function=GLOBAL_QUOTE&symbol=AAPL'
},
{
'name': 'Seeking Alpha',
'host': 'seeking-alpha.p.rapidapi.com',
'endpoint': '/symbols/get-profile?symbols=AAPL'
}
]
results = {}
for api in apis_to_check:
print(f" 测试 {api['name']}...")
result = self.test_api(api['host'], api['endpoint'])
results[api['name']] = result
status = "✅ 可用" if result['success'] else "❌ 不可用"
print(f" {status} - {result.get('response_time', 0):.2f}s")
time.sleep(0.5) # 避免请求过快
return results
def main():
"""主函数"""
checker = RapidAPIChecker()
results = checker.check_common_apis()
print("\n📊 检查结果总结:")
available_count = sum(1 for result in results.values() if result['success'])
print(f"可用API: {available_count}/{len(results)}")
if __name__ == "__main__":
main()