Backup before system reinstall

This commit is contained in:
llama-research
2025-09-06 07:37:16 +00:00
parent f9856c31e5
commit e78aefac88
66 changed files with 16347 additions and 1917 deletions

View File

@@ -0,0 +1,39 @@
#!/usr/bin/env python3
"""
通用记忆银行抽象便于插入不同后端Vertex、Cloudflare AutoRAG等
"""
from __future__ import annotations
from typing import Dict, List, Any, Optional, Protocol, runtime_checkable
@runtime_checkable
class MemoryBankProtocol(Protocol):
async def create_memory_bank(self, agent_name: str, display_name: Optional[str] = None) -> str: ...
async def add_memory(
self,
agent_name: str,
content: str,
memory_type: str = "conversation",
debate_topic: str = "",
metadata: Optional[Dict[str, Any]] = None,
) -> str: ...
async def search_memories(
self,
agent_name: str,
query: str,
memory_type: Optional[str] = None,
limit: int = 10,
) -> List[Dict[str, Any]]: ...
async def get_agent_context(self, agent_name: str, debate_topic: str) -> str: ...
async def save_debate_session(
self,
debate_topic: str,
participants: List[str],
conversation_history: List[Dict[str, str]],
outcomes: Optional[Dict[str, Any]] = None,
) -> None: ...

View File

@@ -0,0 +1,454 @@
#!/usr/bin/env python3
"""
Cloudflare AutoRAG Vectorize 记忆银行实现
为稷下学宫AI辩论系统提供Cloudflare后端的记忆功能
"""
import os
import json
from typing import Dict, List, Optional, Any
from dataclasses import dataclass
from datetime import datetime
import aiohttp
from config.settings import get_cloudflare_config
@dataclass
class MemoryEntry:
"""记忆条目数据结构"""
id: str
content: str
metadata: Dict[str, Any]
timestamp: str # ISO format string
agent_name: str
debate_topic: str
memory_type: str # "conversation", "preference", "knowledge", "strategy"
class CloudflareMemoryBank:
"""
Cloudflare AutoRAG Vectorize 记忆银行管理器
利用Cloudflare Vectorize索引和Workers AI进行向量检索增强生成
"""
def __init__(self):
"""初始化Cloudflare Memory Bank"""
self.config = get_cloudflare_config()
self.account_id = self.config['account_id']
self.api_token = self.config['api_token']
self.vectorize_index = self.config['vectorize_index']
self.embed_model = self.config['embed_model']
self.autorag_domain = self.config['autorag_domain']
# 构建API基础URL
self.base_url = f"https://api.cloudflare.com/client/v4/accounts/{self.account_id}"
self.headers = {
"Authorization": f"Bearer {self.api_token}",
"Content-Type": "application/json"
}
# 八仙智能体名称映射
self.baxian_agents = {
"tieguaili": "铁拐李",
"hanzhongli": "汉钟离",
"zhangguolao": "张果老",
"lancaihe": "蓝采和",
"hexiangu": "何仙姑",
"lvdongbin": "吕洞宾",
"hanxiangzi": "韩湘子",
"caoguojiu": "曹国舅"
}
async def _get_session(self) -> aiohttp.ClientSession:
"""获取aiohttp会话"""
return aiohttp.ClientSession()
async def create_memory_bank(self, agent_name: str, display_name: str = None) -> str:
"""
为指定智能体创建记忆空间在Cloudflare中通过命名空间或元数据实现
Args:
agent_name: 智能体名称 (如 "tieguaili")
display_name: 显示名称 (如 "铁拐李的记忆银行")
Returns:
记忆空间标识符 (这里用agent_name作为标识符)
"""
# Cloudflare Vectorize使用统一的索引通过元数据区分不同智能体的记忆
# 所以这里不需要实际创建,只需要返回标识符
if not display_name:
display_name = self.baxian_agents.get(agent_name, agent_name)
print(f"✅ 为 {display_name} 准备Cloudflare记忆空间")
return f"cf_memory_{agent_name}"
async def add_memory(self,
agent_name: str,
content: str,
memory_type: str = "conversation",
debate_topic: str = "",
metadata: Dict[str, Any] = None) -> str:
"""
添加记忆到Cloudflare Vectorize索引
Args:
agent_name: 智能体名称
content: 记忆内容
memory_type: 记忆类型 ("conversation", "preference", "knowledge", "strategy")
debate_topic: 辩论主题
metadata: 额外元数据
Returns:
记忆ID
"""
if metadata is None:
metadata = {}
# 生成记忆ID
memory_id = f"mem_{agent_name}_{int(datetime.now().timestamp() * 1000000)}"
# 构建记忆条目
memory_entry = MemoryEntry(
id=memory_id,
content=content,
metadata={
**metadata,
"agent_name": agent_name,
"chinese_name": self.baxian_agents.get(agent_name, agent_name),
"memory_type": memory_type,
"debate_topic": debate_topic,
"system": "jixia_academy"
},
timestamp=datetime.now().isoformat(),
agent_name=agent_name,
debate_topic=debate_topic,
memory_type=memory_type
)
# 将记忆条目转换为JSON字符串用于存储和检索
memory_data = {
"id": memory_id,
"values": [], # 向量值将在嵌入时填充
"metadata": memory_entry.metadata
}
try:
# 1. 使用Workers AI生成嵌入向量
embedding = await self._generate_embedding(content)
memory_data["values"] = embedding
# 2. 将记忆插入Vectorize索引
async with await self._get_session() as session:
url = f"{self.base_url}/vectorize/indexes/{self.vectorize_index}/upsert"
payload = {
"vectors": [memory_data]
}
async with session.post(url, headers=self.headers, json=payload) as response:
if response.status == 200:
result = await response.json()
print(f"✅ 为 {self.baxian_agents.get(agent_name)} 添加记忆: {memory_type}")
return memory_id
else:
error_text = await response.text()
raise Exception(f"Failed to upsert memory: {response.status} - {error_text}")
except Exception as e:
print(f"❌ 添加记忆失败: {e}")
raise
async def _generate_embedding(self, text: str) -> List[float]:
"""
使用Cloudflare Workers AI生成文本嵌入
Args:
text: 要嵌入的文本
Returns:
嵌入向量
"""
async with await self._get_session() as session:
url = f"{self.base_url}/ai/run/{self.embed_model}"
payload = {
"text": [text] # Workers AI embeddings API expects a list of texts
}
async with session.post(url, headers=self.headers, json=payload) as response:
if response.status == 200:
result = await response.json()
# 提取嵌入向量 (通常是 result["result"]["data"][0]["embedding"])
if "result" in result and "data" in result["result"] and len(result["result"]["data"]) > 0:
return result["result"]["data"][0]["embedding"]
else:
raise Exception(f"Unexpected embedding response format: {result}")
else:
error_text = await response.text()
raise Exception(f"Failed to generate embedding: {response.status} - {error_text}")
async def search_memories(self,
agent_name: str,
query: str,
memory_type: str = None,
limit: int = 10) -> List[Dict[str, Any]]:
"""
使用向量相似性搜索智能体的相关记忆
Args:
agent_name: 智能体名称
query: 搜索查询
memory_type: 记忆类型过滤
limit: 返回结果数量限制
Returns:
相关记忆列表
"""
try:
# 1. 为查询生成嵌入向量
query_embedding = await self._generate_embedding(query)
# 2. 构建过滤条件
filters = {
"agent_name": agent_name
}
if memory_type:
filters["memory_type"] = memory_type
# 3. 执行向量搜索
async with await self._get_session() as session:
url = f"{self.base_url}/vectorize/indexes/{self.vectorize_index}/query"
payload = {
"vector": query_embedding,
"topK": limit,
"filter": filters,
"returnMetadata": True
}
async with session.post(url, headers=self.headers, json=payload) as response:
if response.status == 200:
result = await response.json()
matches = result.get("result", {}).get("matches", [])
# 格式化返回结果
memories = []
for match in matches:
memory_data = {
"content": match["metadata"].get("content", ""),
"metadata": match["metadata"],
"relevance_score": match["score"]
}
memories.append(memory_data)
return memories
else:
error_text = await response.text()
raise Exception(f"Failed to search memories: {response.status} - {error_text}")
except Exception as e:
print(f"❌ 搜索记忆失败: {e}")
return []
async def get_agent_context(self, agent_name: str, debate_topic: str) -> str:
"""
获取智能体在特定辩论主题下的上下文记忆
Args:
agent_name: 智能体名称
debate_topic: 辩论主题
Returns:
格式化的上下文字符串
"""
# 搜索相关记忆
conversation_memories = await self.search_memories(
agent_name, debate_topic, "conversation", limit=5
)
preference_memories = await self.search_memories(
agent_name, debate_topic, "preference", limit=3
)
strategy_memories = await self.search_memories(
agent_name, debate_topic, "strategy", limit=3
)
# 构建上下文
context_parts = []
if conversation_memories:
context_parts.append("## 历史对话记忆")
for mem in conversation_memories:
context_parts.append(f"- {mem['content']}")
if preference_memories:
context_parts.append("\n## 偏好记忆")
for mem in preference_memories:
context_parts.append(f"- {mem['content']}")
if strategy_memories:
context_parts.append("\n## 策略记忆")
for mem in strategy_memories:
context_parts.append(f"- {mem['content']}")
chinese_name = self.baxian_agents.get(agent_name, agent_name)
if context_parts:
return f"# {chinese_name}的记忆上下文\n\n" + "\n".join(context_parts)
else:
return f"# {chinese_name}的记忆上下文\n\n暂无相关记忆。"
async def save_debate_session(self,
debate_topic: str,
participants: List[str],
conversation_history: List[Dict[str, str]],
outcomes: Dict[str, Any] = None) -> None:
"""
保存完整的辩论会话到各参与者的记忆银行
Args:
debate_topic: 辩论主题
participants: 参与者列表
conversation_history: 对话历史
outcomes: 辩论结果和洞察
"""
for agent_name in participants:
if agent_name not in self.baxian_agents:
continue
# 保存对话历史
conversation_summary = self._summarize_conversation(
conversation_history, agent_name
)
await self.add_memory(
agent_name=agent_name,
content=conversation_summary,
memory_type="conversation",
debate_topic=debate_topic,
metadata={
"participants": participants,
"session_length": len(conversation_history)
}
)
# 保存策略洞察
if outcomes:
strategy_insight = self._extract_strategy_insight(
outcomes, agent_name
)
if strategy_insight:
await self.add_memory(
agent_name=agent_name,
content=strategy_insight,
memory_type="strategy",
debate_topic=debate_topic,
metadata={"session_outcome": outcomes}
)
def _summarize_conversation(self,
conversation_history: List[Dict[str, str]],
agent_name: str) -> str:
"""
为特定智能体总结对话历史
Args:
conversation_history: 对话历史
agent_name: 智能体名称
Returns:
对话总结
"""
agent_messages = [
msg for msg in conversation_history
if msg.get("agent") == agent_name
]
if not agent_messages:
return "本次辩论中未发言"
chinese_name = self.baxian_agents.get(agent_name, agent_name)
summary = f"{chinese_name}在本次辩论中的主要观点:\n"
for i, msg in enumerate(agent_messages[:3], 1): # 只取前3条主要观点
summary += f"{i}. {msg.get('content', '')[:100]}...\n"
return summary
def _extract_strategy_insight(self,
outcomes: Dict[str, Any],
agent_name: str) -> Optional[str]:
"""
从辩论结果中提取策略洞察
Args:
outcomes: 辩论结果
agent_name: 智能体名称
Returns:
策略洞察或None
"""
# 这里可以根据实际的outcomes结构来提取洞察
# 暂时返回一个简单的示例
chinese_name = self.baxian_agents.get(agent_name, agent_name)
if "winner" in outcomes and outcomes["winner"] == agent_name:
return f"{chinese_name}在本次辩论中获胜,其论证策略值得保持。"
elif "insights" in outcomes and agent_name in outcomes["insights"]:
return outcomes["insights"][agent_name]
return None
# 便捷函数
async def initialize_baxian_memory_banks() -> CloudflareMemoryBank:
"""
初始化所有八仙智能体的Cloudflare记忆空间
Returns:
配置好的CloudflareMemoryBank实例
"""
memory_bank = CloudflareMemoryBank()
print("🏛️ 正在为稷下学宫八仙创建Cloudflare记忆空间...")
for agent_key, chinese_name in memory_bank.baxian_agents.items():
try:
await memory_bank.create_memory_bank(agent_key)
except Exception as e:
print(f"⚠️ 创建 {chinese_name} 记忆空间时出错: {e}")
print("✅ 八仙Cloudflare记忆空间初始化完成")
return memory_bank
if __name__ == "__main__":
import asyncio
async def test_memory_bank():
"""测试Cloudflare Memory Bank功能"""
try:
# 创建Memory Bank实例
memory_bank = CloudflareMemoryBank()
# 测试创建记忆空间
await memory_bank.create_memory_bank("tieguaili")
# 测试添加记忆
await memory_bank.add_memory(
agent_name="tieguaili",
content="在讨论NVIDIA股票时我倾向于逆向思维关注潜在风险。",
memory_type="preference",
debate_topic="NVIDIA投资分析"
)
# 测试搜索记忆
results = await memory_bank.search_memories(
agent_name="tieguaili",
query="NVIDIA",
limit=5
)
print(f"搜索结果: {len(results)} 条记忆")
for result in results:
print(f"- {result['content']}")
except Exception as e:
print(f"❌ 测试失败: {e}")
# 运行测试
asyncio.run(test_memory_bank())

View File

@@ -0,0 +1,23 @@
#!/usr/bin/env python3
"""
记忆银行工厂:根据配置创建不同后端实现
"""
from __future__ import annotations
import os
from typing import Optional
from jixia_academy.core.memory_bank.interface import MemoryBankInterface
def get_memory_backend(prefer: Optional[str] = None) -> MemoryBankInterface:
"""
获取记忆银行后端
默认使用内存实现
"""
from jixia_academy.core.memory_bank.memory_impl import MemoryBankImpl
# 使用内存实现
memory_bank = MemoryBankImpl(storage_path="jixia_academy/data/memory")
print("🧠 使用内存记忆银行后端")
return memory_bank

View File

@@ -0,0 +1,106 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
记忆银行接口定义
Memory Bank Interface Definition
"""
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
from datetime import datetime
class MemoryBankInterface(ABC):
"""记忆银行接口"""
@abstractmethod
async def initialize(self):
"""初始化记忆银行"""
pass
@abstractmethod
async def close(self):
"""关闭记忆银行"""
pass
@abstractmethod
async def add_debate_message(
self,
debate_id: str,
speaker: str,
message: str,
round_num: int
):
"""添加辩论消息"""
pass
@abstractmethod
async def get_debate_history(
self,
debate_id: str
) -> List[Dict[str, Any]]:
"""获取辩论历史"""
pass
@abstractmethod
async def save_debate_result(
self,
debate_id: str,
summary: Dict[str, Any],
participants: List[str]
):
"""保存辩论结果"""
pass
@abstractmethod
async def get_debate_result(
self,
debate_id: str
) -> Optional[Dict[str, Any]]:
"""获取辩论结果"""
pass
@abstractmethod
async def list_debates(
self,
limit: int = 10
) -> List[Dict[str, Any]]:
"""列出辩论"""
pass
@abstractmethod
async def add_market_data(
self,
symbol: str,
data: Dict[str, Any]
):
"""添加市场数据"""
pass
@abstractmethod
async def get_market_data(
self,
symbol: str,
start_date: datetime,
end_date: datetime
) -> List[Dict[str, Any]]:
"""获取市场数据"""
pass
@abstractmethod
async def store_analysis(
self,
analysis_id: str,
analysis_type: str,
content: Dict[str, Any]
):
"""存储分析结果"""
pass
@abstractmethod
async def get_analysis(
self,
analysis_id: str
) -> Optional[Dict[str, Any]]:
"""获取分析结果"""
pass

View File

@@ -0,0 +1,463 @@
#!/usr/bin/env python3
"""
Vertex AI Memory Bank 集成模块
为稷下学宫AI辩论系统提供记忆银行功能
"""
import os
from typing import Dict, List, Optional, Any
from dataclasses import dataclass
from datetime import datetime
import json
try:
from google.cloud import aiplatform
# Memory Bank 功能可能还在预览版中,先使用基础功能
VERTEX_AI_AVAILABLE = True
except ImportError:
VERTEX_AI_AVAILABLE = False
print("⚠️ Google Cloud AI Platform 未安装Memory Bank功能不可用")
print("安装命令: pip install google-cloud-aiplatform")
from config.settings import get_google_genai_config
@dataclass
class MemoryEntry:
"""记忆条目数据结构"""
content: str
metadata: Dict[str, Any]
timestamp: datetime
agent_name: str
debate_topic: str
memory_type: str # "conversation", "preference", "knowledge", "strategy"
class VertexMemoryBank:
"""
Vertex AI Memory Bank 管理器
为八仙辩论系统提供智能记忆功能
"""
def __init__(self, project_id: str, location: str = "us-central1"):
"""
初始化Memory Bank
Args:
project_id: Google Cloud项目ID
location: 部署区域
"""
if not VERTEX_AI_AVAILABLE:
print("⚠️ Google Cloud AI Platform 未安装,使用本地模拟模式")
# 不抛出异常,允许使用本地模拟模式
self.project_id = project_id
self.location = location
self.memory_banks = {} # 存储不同智能体的记忆银行
self.local_memories = {} # 本地记忆存储 (临时方案)
# 初始化AI Platform
try:
aiplatform.init(project=project_id, location=location)
print(f"✅ Vertex AI 初始化成功: {project_id} @ {location}")
except Exception as e:
print(f"⚠️ Vertex AI 初始化失败,使用本地模拟模式: {e}")
# 八仙智能体名称映射
self.baxian_agents = {
"tieguaili": "铁拐李",
"hanzhongli": "汉钟离",
"zhangguolao": "张果老",
"lancaihe": "蓝采和",
"hexiangu": "何仙姑",
"lvdongbin": "吕洞宾",
"hanxiangzi": "韩湘子",
"caoguojiu": "曹国舅"
}
@classmethod
def from_config(cls) -> 'VertexMemoryBank':
"""
从配置创建Memory Bank实例
Returns:
VertexMemoryBank实例
"""
config = get_google_genai_config()
project_id = config.get('project_id')
location = config.get('location', 'us-central1')
if not project_id:
raise ValueError("Google Cloud Project ID 未配置,请设置 GOOGLE_CLOUD_PROJECT_ID")
return cls(project_id=project_id, location=location)
async def create_memory_bank(self, agent_name: str, display_name: str = None) -> str:
"""
为指定智能体创建记忆银行
Args:
agent_name: 智能体名称 (如 "tieguaili")
display_name: 显示名称 (如 "铁拐李的记忆银行")
Returns:
记忆银行ID
"""
if not display_name:
chinese_name = self.baxian_agents.get(agent_name, agent_name)
display_name = f"{chinese_name}的记忆银行"
try:
# 使用本地存储模拟记忆银行 (临时方案)
memory_bank_id = f"memory_bank_{agent_name}_{self.project_id}"
# 初始化本地记忆存储
if agent_name not in self.local_memories:
self.local_memories[agent_name] = []
self.memory_banks[agent_name] = memory_bank_id
print(f"✅ 为 {display_name} 创建记忆银行: {memory_bank_id}")
return memory_bank_id
except Exception as e:
print(f"❌ 创建记忆银行失败: {e}")
raise
async def add_memory(self,
agent_name: str,
content: str,
memory_type: str = "conversation",
debate_topic: str = "",
metadata: Dict[str, Any] = None) -> str:
"""
添加记忆到指定智能体的记忆银行
Args:
agent_name: 智能体名称
content: 记忆内容
memory_type: 记忆类型 ("conversation", "preference", "knowledge", "strategy")
debate_topic: 辩论主题
metadata: 额外元数据
Returns:
记忆ID
"""
if agent_name not in self.memory_banks:
await self.create_memory_bank(agent_name)
if metadata is None:
metadata = {}
# 构建记忆条目
memory_entry = MemoryEntry(
content=content,
metadata={
**metadata,
"agent_name": agent_name,
"chinese_name": self.baxian_agents.get(agent_name, agent_name),
"memory_type": memory_type,
"debate_topic": debate_topic,
"system": "jixia_academy"
},
timestamp=datetime.now(),
agent_name=agent_name,
debate_topic=debate_topic,
memory_type=memory_type
)
try:
# 使用本地存储添加记忆 (临时方案)
memory_id = f"memory_{agent_name}_{len(self.local_memories[agent_name])}"
# 添加到本地存储
memory_data = {
"id": memory_id,
"content": content,
"metadata": memory_entry.metadata,
"timestamp": memory_entry.timestamp.isoformat(),
"memory_type": memory_type,
"debate_topic": debate_topic
}
self.local_memories[agent_name].append(memory_data)
print(f"✅ 为 {self.baxian_agents.get(agent_name)} 添加记忆: {memory_type}")
return memory_id
except Exception as e:
print(f"❌ 添加记忆失败: {e}")
raise
async def search_memories(self,
agent_name: str,
query: str,
memory_type: str = None,
limit: int = 10) -> List[Dict[str, Any]]:
"""
搜索智能体的相关记忆
Args:
agent_name: 智能体名称
query: 搜索查询
memory_type: 记忆类型过滤
limit: 返回结果数量限制
Returns:
相关记忆列表
"""
if agent_name not in self.memory_banks:
return []
try:
# 使用本地存储搜索记忆 (临时方案)
if agent_name not in self.local_memories:
return []
memories = self.local_memories[agent_name]
results = []
# 简单的文本匹配搜索
query_lower = query.lower()
for memory in memories:
# 检查记忆类型过滤
if memory_type and memory.get("memory_type") != memory_type:
continue
# 检查内容匹配
content_lower = memory["content"].lower()
debate_topic_lower = memory.get("debate_topic", "").lower()
# 在内容或辩论主题中搜索
if query_lower in content_lower or query_lower in debate_topic_lower:
# 计算简单的相关性分数
content_matches = content_lower.count(query_lower)
topic_matches = debate_topic_lower.count(query_lower)
total_words = len(content_lower.split()) + len(debate_topic_lower.split())
relevance_score = (content_matches + topic_matches) / max(total_words, 1)
results.append({
"content": memory["content"],
"metadata": memory["metadata"],
"relevance_score": relevance_score
})
# 按相关性排序并限制结果数量
results.sort(key=lambda x: x["relevance_score"], reverse=True)
return results[:limit]
except Exception as e:
print(f"❌ 搜索记忆失败: {e}")
return []
async def get_agent_context(self, agent_name: str, debate_topic: str) -> str:
"""
获取智能体在特定辩论主题下的上下文记忆
Args:
agent_name: 智能体名称
debate_topic: 辩论主题
Returns:
格式化的上下文字符串
"""
# 搜索相关记忆
conversation_memories = await self.search_memories(
agent_name, debate_topic, "conversation", limit=5
)
preference_memories = await self.search_memories(
agent_name, debate_topic, "preference", limit=3
)
strategy_memories = await self.search_memories(
agent_name, debate_topic, "strategy", limit=3
)
# 构建上下文
context_parts = []
if conversation_memories:
context_parts.append("## 历史对话记忆")
for mem in conversation_memories:
context_parts.append(f"- {mem['content']}")
if preference_memories:
context_parts.append("\n## 偏好记忆")
for mem in preference_memories:
context_parts.append(f"- {mem['content']}")
if strategy_memories:
context_parts.append("\n## 策略记忆")
for mem in strategy_memories:
context_parts.append(f"- {mem['content']}")
chinese_name = self.baxian_agents.get(agent_name, agent_name)
if context_parts:
return f"# {chinese_name}的记忆上下文\n\n" + "\n".join(context_parts)
else:
return f"# {chinese_name}的记忆上下文\n\n暂无相关记忆。"
async def save_debate_session(self,
debate_topic: str,
participants: List[str],
conversation_history: List[Dict[str, str]],
outcomes: Dict[str, Any] = None) -> None:
"""
保存完整的辩论会话到各参与者的记忆银行
Args:
debate_topic: 辩论主题
participants: 参与者列表
conversation_history: 对话历史
outcomes: 辩论结果和洞察
"""
for agent_name in participants:
if agent_name not in self.baxian_agents:
continue
# 保存对话历史
conversation_summary = self._summarize_conversation(
conversation_history, agent_name
)
await self.add_memory(
agent_name=agent_name,
content=conversation_summary,
memory_type="conversation",
debate_topic=debate_topic,
metadata={
"participants": participants,
"session_length": len(conversation_history)
}
)
# 保存策略洞察
if outcomes:
strategy_insight = self._extract_strategy_insight(
outcomes, agent_name
)
if strategy_insight:
await self.add_memory(
agent_name=agent_name,
content=strategy_insight,
memory_type="strategy",
debate_topic=debate_topic,
metadata={"session_outcome": outcomes}
)
def _summarize_conversation(self,
conversation_history: List[Dict[str, str]],
agent_name: str) -> str:
"""
为特定智能体总结对话历史
Args:
conversation_history: 对话历史
agent_name: 智能体名称
Returns:
对话总结
"""
agent_messages = [
msg for msg in conversation_history
if msg.get("agent") == agent_name
]
if not agent_messages:
return "本次辩论中未发言"
chinese_name = self.baxian_agents.get(agent_name, agent_name)
summary = f"{chinese_name}在本次辩论中的主要观点:\n"
for i, msg in enumerate(agent_messages[:3], 1): # 只取前3条主要观点
summary += f"{i}. {msg.get('content', '')[:100]}...\n"
return summary
def _extract_strategy_insight(self,
outcomes: Dict[str, Any],
agent_name: str) -> Optional[str]:
"""
从辩论结果中提取策略洞察
Args:
outcomes: 辩论结果
agent_name: 智能体名称
Returns:
策略洞察或None
"""
# 这里可以根据实际的outcomes结构来提取洞察
# 暂时返回一个简单的示例
chinese_name = self.baxian_agents.get(agent_name, agent_name)
if "winner" in outcomes and outcomes["winner"] == agent_name:
return f"{chinese_name}在本次辩论中获胜,其论证策略值得保持。"
elif "insights" in outcomes and agent_name in outcomes["insights"]:
return outcomes["insights"][agent_name]
return None
# 便捷函数
async def initialize_baxian_memory_banks(project_id: str, location: str = "us-central1") -> VertexMemoryBank:
"""
初始化所有八仙智能体的记忆银行
Args:
project_id: Google Cloud项目ID
location: 部署区域
Returns:
配置好的VertexMemoryBank实例
"""
memory_bank = VertexMemoryBank(project_id, location)
print("🏛️ 正在为稷下学宫八仙创建记忆银行...")
for agent_key, chinese_name in memory_bank.baxian_agents.items():
try:
await memory_bank.create_memory_bank(agent_key)
except Exception as e:
print(f"⚠️ 创建 {chinese_name} 记忆银行时出错: {e}")
print("✅ 八仙记忆银行初始化完成")
return memory_bank
if __name__ == "__main__":
import asyncio
async def test_memory_bank():
"""测试Memory Bank功能"""
try:
# 从配置创建Memory Bank
memory_bank = VertexMemoryBank.from_config()
# 测试创建记忆银行
await memory_bank.create_memory_bank("tieguaili")
# 测试添加记忆
await memory_bank.add_memory(
agent_name="tieguaili",
content="在讨论NVIDIA股票时我倾向于逆向思维关注潜在风险。",
memory_type="preference",
debate_topic="NVIDIA投资分析"
)
# 测试搜索记忆
results = await memory_bank.search_memories(
agent_name="tieguaili",
query="NVIDIA",
limit=5
)
print(f"搜索结果: {len(results)} 条记忆")
for result in results:
print(f"- {result['content']}")
except Exception as e:
print(f"❌ 测试失败: {e}")
# 运行测试
asyncio.run(test_memory_bank())