refactor(project): 重构项目文档并优化代码结构

- 移除旧的文档结构和内容,清理 root 目录下的 markdown 文件
- 删除 GitHub Pages 部署配置和相关文件
- 移除 .env.example 文件,使用 Doppler 进行环境变量管理
- 更新 README.md,增加对 OpenBB 数据的支持
- 重构 streamlit_app.py,移除 Swarm 模式相关代码
- 更新 Doppler 配置管理模块,增加对 .env 文件的支持
- 删除 Memory Bank 实验和测试脚本
- 清理内部文档和开发计划
This commit is contained in:
ben
2025-08-18 16:56:04 +00:00
parent c4e8cfefc7
commit 51576ebb6f
87 changed files with 13056 additions and 1959 deletions

View File

@@ -0,0 +1,258 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
八仙分层辩论系统:强模型分解 + 小模型辩论
架构:
1. 强模型如GPT-4进行问题分解和观点提炼
2. 小模型如Gemini Flash基于分解结果进行辩论
"""
import asyncio
import json
import time
from typing import Dict, List, Any
import aiohttp
import logging
# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class BreakdownDebateSystem:
def __init__(self):
# API配置
self.api_base = "http://localhost:4000"
self.api_key = "sk-1234"
# 模型配置
self.strong_model = "fireworks_ai/accounts/fireworks/models/deepseek-v3-0324" # 强模型用于分解
self.debate_model = "gemini/gemini-2.5-flash" # 小模型用于辩论
# 辩论主题
self.topic = "工作量证明vs无限制爬虫从李时珍采药到AI数据获取的激励机制变革"
# 八仙角色定义
self.immortals = {
"吕洞宾": {"性别": "", "特征": "文雅学者,理性分析", "立场": "支持工作量证明机制"},
"何仙姑": {"性别": "", "特征": "温和智慧,注重平衡", "立场": "支持无限制数据获取"},
"张果老": {"年龄": "", "特征": "经验丰富,传统智慧", "立场": "支持传统激励机制"},
"韩湘子": {"年龄": "", "特征": "创新思维,前瞻视野", "立场": "支持AI时代新机制"},
"汉钟离": {"地位": "", "特征": "资源丰富,商业思维", "立场": "支持市场化激励"},
"蓝采和": {"地位": "", "特征": "平民视角,公平关注", "立场": "支持开放共享"},
"曹国舅": {"出身": "", "特征": "权威地位,规则意识", "立场": "支持制度化管理"},
"铁拐李": {"出身": "", "特征": "草根智慧,实用主义", "立场": "支持去中心化"}
}
# 对角线辩论配置
self.debate_pairs = [
("吕洞宾", "何仙姑"), # 男女对角线
("张果老", "韩湘子"), # 老少对角线
("汉钟离", "蓝采和"), # 富贫对角线
("曹国舅", "铁拐李") # 贵贱对角线
]
async def call_api(self, model: str, messages: List[Dict], max_tokens: int = 1000) -> str:
"""调用API"""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
data = {
"model": model,
"messages": messages,
"max_tokens": max_tokens,
"temperature": 0.7
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(f"{self.api_base}/chat/completions",
headers=headers, json=data) as response:
if response.status == 200:
result = await response.json()
return result['choices'][0]['message']['content']
else:
error_text = await response.text()
logger.error(f"API调用失败: {response.status} - {error_text}")
return f"API调用失败: {response.status}"
except Exception as e:
logger.error(f"API调用异常: {str(e)}")
return f"API调用异常: {str(e)}"
async def breakdown_topic(self) -> Dict[str, Any]:
"""使用强模型分解辩论主题"""
logger.info("🧠 开始使用强模型分解辩论主题...")
breakdown_prompt = f"""
你是一个专业的辩论分析师。请对以下主题进行深度分解:
主题:{self.topic}
请提供:
1. 核心争议点3-5个
2. 支持工作量证明机制的关键论据3个
3. 支持无限制爬虫/数据获取的关键论据3个
4. 历史对比分析要点
5. 未来发展趋势预测
请以JSON格式返回结构如下
{{
"core_issues": ["争议点1", "争议点2", ...],
"pro_pow_arguments": ["论据1", "论据2", "论据3"],
"pro_unlimited_arguments": ["论据1", "论据2", "论据3"],
"historical_analysis": ["要点1", "要点2", ...],
"future_trends": ["趋势1", "趋势2", ...]
}}
"""
messages = [
{"role": "system", "content": "你是一个专业的辩论分析师,擅长深度分析复杂议题。"},
{"role": "user", "content": breakdown_prompt}
]
response = await self.call_api(self.strong_model, messages, max_tokens=2000)
try:
# 尝试解析JSON
breakdown_data = json.loads(response)
logger.info("✅ 主题分解完成")
return breakdown_data
except json.JSONDecodeError:
logger.error("❌ 强模型返回的不是有效JSON使用默认分解")
return {
"core_issues": ["激励机制公平性", "创作者权益保护", "技术发展与伦理平衡"],
"pro_pow_arguments": ["保护创作者权益", "维护内容质量", "建立可持续生态"],
"pro_unlimited_arguments": ["促进知识传播", "加速技术发展", "降低获取成本"],
"historical_analysis": ["从手工采药到工业化生产的变迁", "知识产权制度的演进"],
"future_trends": ["AI与人类协作模式", "新型激励机制探索"]
}
async def conduct_debate(self, breakdown_data: Dict[str, Any]):
"""基于分解结果进行八仙辩论"""
logger.info("🎭 开始八仙对角线辩论...")
for i, (immortal1, immortal2) in enumerate(self.debate_pairs, 1):
logger.info(f"\n{'='*60}")
logger.info(f"{i}场辩论:{immortal1} vs {immortal2}")
logger.info(f"{'='*60}")
# 为每个仙人准备个性化的论据
immortal1_info = self.immortals[immortal1]
immortal2_info = self.immortals[immortal2]
# 第一轮:开场陈述
statement1 = await self.get_opening_statement(immortal1, immortal1_info, breakdown_data)
logger.info(f"\n🗣️ {immortal1}的开场陈述:")
logger.info(statement1)
statement2 = await self.get_opening_statement(immortal2, immortal2_info, breakdown_data)
logger.info(f"\n🗣️ {immortal2}的开场陈述:")
logger.info(statement2)
# 第二轮:相互回应
response1 = await self.get_response(immortal1, immortal1_info, statement2, breakdown_data)
logger.info(f"\n💬 {immortal1}的回应:")
logger.info(response1)
response2 = await self.get_response(immortal2, immortal2_info, statement1, breakdown_data)
logger.info(f"\n💬 {immortal2}的回应:")
logger.info(response2)
# 第三轮:总结陈词
summary1 = await self.get_summary(immortal1, immortal1_info, [statement1, statement2, response1, response2], breakdown_data)
logger.info(f"\n📝 {immortal1}的总结:")
logger.info(summary1)
await asyncio.sleep(2) # 短暂停顿
logger.info(f"\n{'='*60}")
logger.info("🎉 所有四场对角线辩论已完成!")
logger.info(f"{'='*60}")
async def get_opening_statement(self, immortal: str, immortal_info: Dict, breakdown_data: Dict) -> str:
"""获取开场陈述"""
prompt = f"""
你是{immortal}{immortal_info['特征']}。你的立场是:{immortal_info['立场']}
基于以下分解分析,请发表你的开场陈述:
核心争议点:{', '.join(breakdown_data['core_issues'])}
支持工作量证明的论据:{', '.join(breakdown_data['pro_pow_arguments'])}
支持无限制获取的论据:{', '.join(breakdown_data['pro_unlimited_arguments'])}
历史分析要点:{', '.join(breakdown_data['historical_analysis'])}
未来趋势:{', '.join(breakdown_data['future_trends'])}
请以{immortal}的身份和特征结合你的立场发表一段150字左右的开场陈述。要体现你的个性特征和观点倾向。
"""
messages = [
{"role": "system", "content": f"你是{immortal},请保持角色一致性。"},
{"role": "user", "content": prompt}
]
return await self.call_api(self.debate_model, messages)
async def get_response(self, immortal: str, immortal_info: Dict, opponent_statement: str, breakdown_data: Dict) -> str:
"""获取回应"""
prompt = f"""
你是{immortal}{immortal_info['特征']}。你的立场是:{immortal_info['立场']}
对方刚才说:
{opponent_statement}
基于分解分析的要点:
{', '.join(breakdown_data['core_issues'])}
请以{immortal}的身份回应对方的观点约100字。要体现你的立场和特征。
"""
messages = [
{"role": "system", "content": f"你是{immortal},请保持角色一致性。"},
{"role": "user", "content": prompt}
]
return await self.call_api(self.debate_model, messages)
async def get_summary(self, immortal: str, immortal_info: Dict, all_statements: List[str], breakdown_data: Dict) -> str:
"""获取总结陈词"""
prompt = f"""
你是{immortal}{immortal_info['特征']}。你的立场是:{immortal_info['立场']}
基于刚才的辩论内容和分解分析请发表你的总结陈词约120字。
要总结你的核心观点,并展望未来。
分析要点:{', '.join(breakdown_data['future_trends'])}
"""
messages = [
{"role": "system", "content": f"你是{immortal},请保持角色一致性。"},
{"role": "user", "content": prompt}
]
return await self.call_api(self.debate_model, messages)
async def run(self):
"""运行完整的分层辩论系统"""
logger.info("🚀 启动八仙分层辩论系统")
logger.info(f"主题:{self.topic}")
logger.info(f"强模型(分解):{self.strong_model}")
logger.info(f"辩论模型:{self.debate_model}")
# 第一阶段:强模型分解
breakdown_data = await self.breakdown_topic()
logger.info("\n📊 分解结果:")
for key, value in breakdown_data.items():
logger.info(f"{key}: {value}")
# 第二阶段:小模型辩论
await self.conduct_debate(breakdown_data)
logger.info("\n🎊 分层辩论系统运行完成!")
if __name__ == "__main__":
system = BreakdownDebateSystem()
asyncio.run(system.run())

View File

@@ -0,0 +1,250 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
八仙辩论系统 - 自定义API版本
使用自定义LiteLLM端点而不是Google ADK
"""
import asyncio
import aiohttp
import json
import os
from typing import List, Dict, Any
import time
class CustomAPIAgent:
"""使用自定义API的代理"""
def __init__(self, name: str, personality: str, api_url: str, api_key: str, model: str = "fireworks_ai/accounts/fireworks/models/deepseek-v3-0324"):
self.name = name
self.personality = personality
self.api_url = api_url
self.api_key = api_key
self.model = model
async def generate_response(self, prompt: str, session: aiohttp.ClientSession) -> str:
"""生成AI回应"""
try:
headers = {
"Content-Type": "application/json",
"x-litellm-api-key": self.api_key
}
payload = {
"model": self.model,
"messages": [
{"role": "system", "content": f"你是{self.name}{self.personality}"},
{"role": "user", "content": prompt}
],
"max_tokens": 1000,
"temperature": 0.8
}
async with session.post(
f"{self.api_url}/v1/chat/completions",
headers=headers,
json=payload,
timeout=aiohttp.ClientTimeout(total=30)
) as response:
if response.status == 200:
result = await response.json()
content = result.get('choices', [{}])[0].get('message', {}).get('content', '')
if content:
return content.strip()
else:
print(f"{self.name} API返回空内容: {result}")
return f"[{self.name}暂时无法回应API返回空内容]"
else:
error_text = await response.text()
print(f"{self.name} API错误 ({response.status}): {error_text[:200]}...")
return f"[{self.name}暂时无法回应API错误: {response.status}]"
except Exception as e:
print(f"{self.name} 生成回应时出错: {e}")
return f"[{self.name}暂时无法回应,连接错误]"
class BaXianCustomDebateSystem:
"""八仙自定义API辩论系统"""
def __init__(self, api_url: str, api_key: str):
self.api_url = api_url.rstrip('/')
self.api_key = api_key
# 创建八仙代理
self.agents = {
"吕洞宾": CustomAPIAgent(
"吕洞宾",
"八仙之首,男性代表,理性务实,善于分析问题的本质和长远影响。你代表男性视角,注重逻辑和实用性。",
api_url, api_key
),
"何仙姑": CustomAPIAgent(
"何仙姑",
"八仙中唯一的女性,温柔智慧,善于从情感和人文角度思考问题。你代表女性视角,注重关怀和和谐。",
api_url, api_key
),
"张果老": CustomAPIAgent(
"张果老",
"八仙中的长者,经验丰富,代表传统智慧和保守观点。你重视稳定和传承,谨慎对待变化。",
api_url, api_key
),
"韩湘子": CustomAPIAgent(
"韩湘子",
"八仙中的年轻人,充满活力和创新精神。你代表新生代观点,勇于尝试和改变。",
api_url, api_key
),
"汉钟离": CustomAPIAgent(
"汉钟离",
"八仙中的富贵者,见多识广,代表富裕阶层的观点。你注重效率和成果,善于资源配置。",
api_url, api_key
),
"蓝采和": CustomAPIAgent(
"蓝采和",
"八仙中的贫苦出身,朴实无华,代表普通民众的观点。你关注基层需求,重视公平正义。",
api_url, api_key
),
"曹国舅": CustomAPIAgent(
"曹国舅",
"八仙中的贵族,出身高贵,代表上层社会观点。你注重秩序和礼仪,维护既有体系。",
api_url, api_key
),
"铁拐李": CustomAPIAgent(
"铁拐李",
"八仙中的平民英雄,不拘小节,代表底层民众观点。你直言不讳,为弱势群体发声。",
api_url, api_key
)
}
# 定义四对矛盾的对角线辩论
self.debate_pairs = [
("吕洞宾", "何仙姑", "男女对立辩论"),
("张果老", "韩湘子", "老少对立辩论"),
("汉钟离", "蓝采和", "富贫对立辩论"),
("曹国舅", "铁拐李", "贵贱对立辩论")
]
async def test_api_connection(self) -> bool:
"""测试API连接"""
print(f"🔍 测试API连接: {self.api_url}")
try:
async with aiohttp.ClientSession() as session:
headers = {"x-litellm-api-key": self.api_key}
async with session.get(
f"{self.api_url}/v1/models",
headers=headers,
timeout=aiohttp.ClientTimeout(total=10)
) as response:
if response.status == 200:
models = await response.json()
print(f"✅ API连接成功找到 {len(models.get('data', []))} 个模型")
return True
else:
error_text = await response.text()
print(f"❌ API连接失败 ({response.status}): {error_text[:200]}...")
return False
except Exception as e:
print(f"❌ API连接测试失败: {e}")
return False
async def conduct_debate(self, topic: str) -> None:
"""进行完整的八仙辩论"""
print(f"\n{'='*80}")
print(f"🎭 八仙自定义API辩论系统")
print(f"📝 辩论主题: {topic}")
print(f"🔗 API端点: {self.api_url}")
print(f"{'='*80}\n")
# 测试API连接
if not await self.test_api_connection():
print("❌ API连接失败无法进行辩论")
return
async with aiohttp.ClientSession() as session:
for i, (agent1_name, agent2_name, debate_type) in enumerate(self.debate_pairs, 1):
print(f"\n🎯 第{i}场辩论: {debate_type}")
print(f"⚔️ {agent1_name} VS {agent2_name}")
print(f"📋 主题: {topic}")
print("-" * 60)
agent1 = self.agents[agent1_name]
agent2 = self.agents[agent2_name]
# 第一轮agent1开场
prompt1 = f"针对'{topic}'这个话题请从你的角度阐述观点。要求1)明确表达立场 2)提供具体论据 3)字数控制在200字以内"
print(f"\n🗣️ {agent1_name}发言:")
agent1_reply = await agent1.generate_response(prompt1, session)
print(f"{agent1_reply}\n")
# 第二轮agent2回应
prompt2 = f"针对'{topic}'这个话题,{agent1_name}刚才说:'{agent1_reply}'。请从你的角度回应并阐述不同观点。要求1)回应对方观点 2)提出自己的立场 3)字数控制在200字以内"
print(f"🗣️ {agent2_name}回应:")
agent2_reply = await agent2.generate_response(prompt2, session)
print(f"{agent2_reply}\n")
# 第三轮agent1总结
prompt3 = f"针对'{topic}'这个话题的辩论,{agent2_name}回应说:'{agent2_reply}'。请做最后总结发言。要求1)回应对方观点 2)强化自己立场 3)寻求共识或妥协 4)字数控制在150字以内"
print(f"🗣️ {agent1_name}总结:")
agent1_final = await agent1.generate_response(prompt3, session)
print(f"{agent1_final}\n")
print(f"✅ 第{i}场辩论结束\n")
# 短暂延迟避免API限制
await asyncio.sleep(1)
print(f"\n🎉 八仙辩论全部结束!")
print(f"📊 共进行了 {len(self.debate_pairs)} 场对角线辩论")
print(f"🎭 参与仙人: {', '.join(self.agents.keys())}")
async def main():
"""主函数"""
# 配置
api_url = "http://master.tailnet-68f9.ts.net:40012"
# 尝试不同的API密钥格式
gemini_key = os.getenv('GEMINI_API_KEY', '')
if not gemini_key:
print("❌ 错误: 未找到GEMINI_API_KEY环境变量")
print("请设置环境变量: export GEMINI_API_KEY=your_api_key")
return
# 使用提供的LiteLLM虚拟密钥
test_keys = [
"sk-0jdcGHZJpX2oUJmyEs7zVA" # LiteLLM虚拟密钥
]
print("🚀 启动八仙自定义API辩论系统...")
# 辩论主题
topic = "工作量证明vs无限制爬虫从李时珍采药到AI数据获取的激励机制变革"
# 尝试不同的API密钥
for api_key in test_keys:
if not api_key or api_key == "sk-":
continue
print(f"\n🔑 尝试API密钥: {api_key[:15]}...")
debate_system = BaXianCustomDebateSystem(api_url, api_key)
# 测试连接
if await debate_system.test_api_connection():
print(f"✅ 使用API密钥成功: {api_key[:15]}...")
await debate_system.conduct_debate(topic)
return
else:
print(f"❌ API密钥失败: {api_key[:15]}...")
print("\n❌ 所有API密钥都失败了")
print("\n🔍 可能的解决方案:")
print(" 1. 检查LiteLLM服务器是否正确配置")
print(" 2. 确认API密钥格式")
print(" 3. 联系服务器管理员获取正确的虚拟密钥")
print(" 4. 检查网络连接和防火墙设置")
if __name__ == "__main__":
asyncio.run(main())

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,36 @@
{
"吕洞宾": [
{
"message": "你觉得特斯拉股票怎么样?",
"response": "这位股友,特斯拉啊,那是柄锋利的剑,但也得看你握得稳不稳。技术面上看,波动剧烈,高风险高回报!我吕洞宾向来胆大,看好其未来潜力,但提醒一句,入场需谨慎,设好止损位,别被震下马!这波行情,或可搏一搏,但盈亏自负,切记切记!\n",
"timestamp": "2025-08-10T15:13:34.203982"
},
{
"message": "现在市场波动很大,你怎么看?",
"response": "市场波动大?正合我意!乱世出英雄,震荡出机会!这正是剑仙我辈大展身手之时!技术分析看穿迷雾,短线操作把握良机。但切记,风浪越大越要稳住船舵,设好止损,见好就收,莫贪恋!\n",
"timestamp": "2025-08-10T15:13:35.458763"
},
{
"message": "你之前不是看好特斯拉吗?现在还是这个观点吗?",
"response": "这位股友,宝剑锋从磨砺出,梅花香自苦寒来!特斯拉虽遇风浪,但我仍看好它的长远潜力。短期震荡乃兵家常事,我会密切关注技术指标,伺机而动。剑仙的眼光,可不是轻易改变的!当然,投资需谨慎,切莫盲从,一切盈亏还需自负!\n",
"timestamp": "2025-08-10T15:13:37.048007"
}
],
"何仙姑": [
{
"message": "特斯拉股票风险大吗?",
"response": "这位朋友,特斯拉股票确实存在一定风险。电动车市场竞争激烈,技术迭代迅速,政策变化也可能带来影响。此外,马斯克个人的言行也可能对股价造成波动。投资需谨慎,建议您充分了解特斯拉的财务状况、行业地位和未来发展规划,结合自身的风险承受能力,审慎决策。\n",
"timestamp": "2025-08-10T15:13:38.280431"
},
{
"message": "现在适合投资吗?",
"response": "这位朋友,现在市场波动较大,投资环境存在不确定性。虽然特斯拉前景广阔,但也面临竞争压力和技术风险。我建议您谨慎评估,充分了解市场动态和特斯拉的最新情况,并结合自身的风险承受能力,再做决定。稳健投资,安全第一。\n",
"timestamp": "2025-08-10T15:13:39.502875"
},
{
"message": "你一直很谨慎,现在还是建议观望吗?",
"response": "这位朋友,谨慎是我的本分。当前市场环境依然复杂,特斯拉也面临着诸多挑战。虽然长期来看,其发展潜力巨大,但短期内仍可能存在波动。我仍然建议您保持观望,密切关注市场动态和特斯拉的最新消息,待风险进一步明确后再做决策。安全第一,稳健投资才是长久之计。\n",
"timestamp": "2025-08-10T15:13:40.890509"
}
]
}

View File

@@ -0,0 +1,39 @@
{
"current_stage": "起",
"stage_progress": 4,
"total_handoffs": 0,
"debate_history": [
{
"timestamp": "2025-08-10T15:30:47.514243",
"stage": "起",
"progress": 0,
"speaker": "吕洞宾",
"message": "起:八仙按先天八卦顺序阐述观点",
"handoffs": 0
},
{
"timestamp": "2025-08-10T15:30:47.514260",
"stage": "起",
"progress": 1,
"speaker": "何仙姑",
"message": "承:雁阵式承接,总体阐述+讥讽",
"handoffs": 0
},
{
"timestamp": "2025-08-10T15:30:47.514272",
"stage": "起",
"progress": 2,
"speaker": "铁拐李",
"message": "转自由辩论36次handoff",
"handoffs": 0
},
{
"timestamp": "2025-08-10T15:30:47.514281",
"stage": "起",
"progress": 3,
"speaker": "汉钟离",
"message": "合:交替总结,最终论证",
"handoffs": 0
}
]
}

View File

@@ -1,275 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Memory Bank 实验脚本
测试八仙人格的长期记忆功能
"""
import os
import asyncio
from datetime import datetime
from typing import Dict, List, Any
import json
# Google GenAI 导入
try:
import google.genai as genai
from google.genai import types
except ImportError:
print("❌ 请安装 google-genai: pip install google-genai")
exit(1)
class MemoryBankExperiment:
"""Memory Bank 实验类"""
def __init__(self):
self.api_key = os.getenv('GOOGLE_API_KEY')
if not self.api_key:
raise ValueError("请设置 GOOGLE_API_KEY 环境变量")
# 初始化 GenAI
genai.configure(api_key=self.api_key)
# 八仙人格基线
self.immortal_baselines = {
"吕洞宾": {
"mbti_type": "ENTJ",
"core_traits": {
"assertiveness": 0.9,
"analytical": 0.8,
"risk_tolerance": 0.8,
"optimism": 0.7
},
"personality_description": "剑仙投资顾问,主动进取,敢于冒险,技术分析专家"
},
"何仙姑": {
"mbti_type": "ISFJ",
"core_traits": {
"empathy": 0.9,
"caution": 0.8,
"loyalty": 0.8,
"optimism": 0.4
},
"personality_description": "慈悲风控专家,谨慎小心,保护意识强,风险厌恶"
},
"张果老": {
"mbti_type": "INTP",
"core_traits": {
"analytical": 0.9,
"curiosity": 0.8,
"traditional": 0.7,
"caution": 0.6
},
"personality_description": "历史数据分析师,深度思考,逆向思维,传统智慧"
}
}
# 记忆存储(模拟 Memory Bank
self.memory_bank = {}
def initialize_immortal_memory(self, immortal_name: str):
"""初始化仙人的记忆空间"""
if immortal_name not in self.memory_bank:
self.memory_bank[immortal_name] = {
"personality_baseline": self.immortal_baselines[immortal_name],
"conversation_history": [],
"viewpoint_evolution": [],
"decision_history": [],
"created_at": datetime.now().isoformat(),
"last_updated": datetime.now().isoformat()
}
print(f"🎭 初始化 {immortal_name} 的记忆空间")
def store_memory(self, immortal_name: str, memory_type: str, content: Dict[str, Any]):
"""存储记忆到 Memory Bank"""
self.initialize_immortal_memory(immortal_name)
memory_entry = {
"type": memory_type,
"content": content,
"timestamp": datetime.now().isoformat(),
"session_id": f"session_{len(self.memory_bank[immortal_name]['conversation_history'])}"
}
if memory_type == "conversation":
self.memory_bank[immortal_name]["conversation_history"].append(memory_entry)
elif memory_type == "viewpoint":
self.memory_bank[immortal_name]["viewpoint_evolution"].append(memory_entry)
elif memory_type == "decision":
self.memory_bank[immortal_name]["decision_history"].append(memory_entry)
self.memory_bank[immortal_name]["last_updated"] = datetime.now().isoformat()
print(f"💾 {immortal_name} 存储了 {memory_type} 记忆")
def retrieve_relevant_memories(self, immortal_name: str, query: str) -> List[Dict]:
"""检索相关记忆"""
if immortal_name not in self.memory_bank:
return []
# 简单的关键词匹配(实际应该使用向量相似度搜索)
relevant_memories = []
query_lower = query.lower()
for memory in self.memory_bank[immortal_name]["conversation_history"]:
if any(keyword in memory["content"].get("message", "").lower()
for keyword in query_lower.split()):
relevant_memories.append(memory)
return relevant_memories[-5:] # 返回最近5条相关记忆
async def generate_immortal_response(self, immortal_name: str, query: str) -> str:
"""生成仙人的回应,基于记忆和人格基线"""
# 检索相关记忆
relevant_memories = self.retrieve_relevant_memories(immortal_name, query)
# 构建上下文
context = self.build_context(immortal_name, relevant_memories)
# 生成回应
model = genai.GenerativeModel('gemini-2.0-flash-exp')
prompt = f"""
你是{immortal_name}{self.immortal_baselines[immortal_name]['personality_description']}
你的核心人格特质:
{json.dumps(self.immortal_baselines[immortal_name]['core_traits'], ensure_ascii=False, indent=2)}
你的相关记忆:
{json.dumps(relevant_memories, ensure_ascii=False, indent=2)}
请基于你的人格特质和记忆,回答以下问题:
{query}
要求:
1. 保持人格一致性
2. 参考历史记忆
3. 回答控制在100字以内
4. 体现你的独特风格
"""
response = await model.generate_content_async(prompt)
return response.text
def build_context(self, immortal_name: str, memories: List[Dict]) -> str:
"""构建上下文信息"""
context_parts = []
# 添加人格基线
baseline = self.immortal_baselines[immortal_name]
context_parts.append(f"人格类型: {baseline['mbti_type']}")
context_parts.append(f"核心特质: {json.dumps(baseline['core_traits'], ensure_ascii=False)}")
# 添加相关记忆
if memories:
context_parts.append("相关记忆:")
for memory in memories[-3:]: # 最近3条记忆
context_parts.append(f"- {memory['content'].get('message', '')}")
return "\n".join(context_parts)
def simulate_conversation(self, immortal_name: str, messages: List[str]):
"""模拟对话,测试记忆功能"""
print(f"\n🎭 开始与 {immortal_name} 的对话")
print("=" * 50)
for i, message in enumerate(messages):
print(f"\n用户: {message}")
# 生成回应
response = asyncio.run(self.generate_immortal_response(immortal_name, message))
print(f"{immortal_name}: {response}")
# 存储记忆
self.store_memory(immortal_name, "conversation", {
"user_message": message,
"immortal_response": response,
"session_id": f"session_{i}"
})
# 存储观点
if "看多" in response or "看空" in response or "观望" in response:
viewpoint = "看多" if "看多" in response else "看空" if "看空" in response else "观望"
self.store_memory(immortal_name, "viewpoint", {
"symbol": "TSLA", # 假设讨论特斯拉
"viewpoint": viewpoint,
"reasoning": response
})
def analyze_memory_evolution(self, immortal_name: str):
"""分析记忆演化"""
if immortal_name not in self.memory_bank:
print(f"{immortal_name} 没有记忆数据")
return
memory_data = self.memory_bank[immortal_name]
print(f"\n📊 {immortal_name} 记忆分析")
print("=" * 50)
print(f"记忆空间创建时间: {memory_data['created_at']}")
print(f"最后更新时间: {memory_data['last_updated']}")
print(f"对话记录数: {len(memory_data['conversation_history'])}")
print(f"观点演化数: {len(memory_data['viewpoint_evolution'])}")
print(f"决策记录数: {len(memory_data['decision_history'])}")
# 分析观点演化
if memory_data['viewpoint_evolution']:
print(f"\n观点演化轨迹:")
for i, viewpoint in enumerate(memory_data['viewpoint_evolution']):
print(f" {i+1}. {viewpoint['content']['viewpoint']} - {viewpoint['timestamp']}")
def save_memory_bank(self, filename: str = "memory_bank_backup.json"):
"""保存记忆库到文件"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(self.memory_bank, f, ensure_ascii=False, indent=2)
print(f"💾 记忆库已保存到 {filename}")
def load_memory_bank(self, filename: str = "memory_bank_backup.json"):
"""从文件加载记忆库"""
try:
with open(filename, 'r', encoding='utf-8') as f:
self.memory_bank = json.load(f)
print(f"📂 记忆库已从 {filename} 加载")
except FileNotFoundError:
print(f"⚠️ 文件 {filename} 不存在,使用空记忆库")
def main():
"""主实验函数"""
print("🚀 开始 Memory Bank 实验")
print("=" * 60)
# 创建实验实例
experiment = MemoryBankExperiment()
# 测试对话场景
test_scenarios = {
"吕洞宾": [
"你觉得特斯拉股票怎么样?",
"现在市场波动很大,你怎么看?",
"你之前不是看好特斯拉吗?现在还是这个观点吗?"
],
"何仙姑": [
"特斯拉股票风险大吗?",
"现在适合投资吗?",
"你一直很谨慎,现在还是建议观望吗?"
],
"张果老": [
"从历史数据看,特斯拉表现如何?",
"现在的估值合理吗?",
"你之前分析过特斯拉的历史数据,现在有什么新发现?"
]
}
# 执行实验
for immortal_name, messages in test_scenarios.items():
experiment.simulate_conversation(immortal_name, messages)
experiment.analyze_memory_evolution(immortal_name)
# 保存记忆库
experiment.save_memory_bank()
print("\n🎉 Memory Bank 实验完成!")
print("=" * 60)
if __name__ == "__main__":
main()

View File

@@ -1,116 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Memory Bank 简化测试脚本
"""
import os
import asyncio
from datetime import datetime
import json
# Google GenAI 导入
import google.genai as genai
class MemoryBankTest:
"""Memory Bank 测试类"""
def __init__(self):
self.api_key = os.getenv('GOOGLE_API_KEY')
if not self.api_key:
raise ValueError("请设置 GOOGLE_API_KEY 环境变量")
self.client = genai.Client(api_key=self.api_key)
# 八仙人格基线
self.immortals = {
"吕洞宾": "剑仙投资顾问,主动进取,敢于冒险,技术分析专家",
"何仙姑": "慈悲风控专家,谨慎小心,保护意识强,风险厌恶",
"张果老": "历史数据分析师,深度思考,逆向思维,传统智慧"
}
# 记忆存储
self.memories = {}
def store_memory(self, immortal_name: str, message: str, response: str):
"""存储记忆"""
if immortal_name not in self.memories:
self.memories[immortal_name] = []
self.memories[immortal_name].append({
"message": message,
"response": response,
"timestamp": datetime.now().isoformat()
})
def chat_with_immortal(self, immortal_name: str, message: str) -> str:
"""与仙人对话"""
# 构建上下文
context = f"你是{immortal_name}{self.immortals[immortal_name]}"
# 添加记忆
if immortal_name in self.memories and self.memories[immortal_name]:
context += "\n\n你的历史对话:"
for memory in self.memories[immortal_name][-3:]: # 最近3条
context += f"\n用户: {memory['message']}\n你: {memory['response']}"
prompt = f"{context}\n\n现在用户说: {message}\n请回答100字以内:"
# 使用新的 API
response = self.client.models.generate_content(
model="gemini-2.0-flash-exp",
contents=[{"parts": [{"text": prompt}]}]
)
return response.candidates[0].content.parts[0].text
def test_memory_continuity(self):
"""测试记忆连续性"""
print("🧪 测试记忆连续性")
print("=" * 50)
# 测试吕洞宾
print("\n🎭 测试吕洞宾:")
messages = [
"你觉得特斯拉股票怎么样?",
"现在市场波动很大,你怎么看?",
"你之前不是看好特斯拉吗?现在还是这个观点吗?"
]
for message in messages:
print(f"\n用户: {message}")
response = self.chat_with_immortal("吕洞宾", message)
print(f"吕洞宾: {response}")
self.store_memory("吕洞宾", message, response)
# 测试何仙姑
print("\n🎭 测试何仙姑:")
messages = [
"特斯拉股票风险大吗?",
"现在适合投资吗?",
"你一直很谨慎,现在还是建议观望吗?"
]
for message in messages:
print(f"\n用户: {message}")
response = self.chat_with_immortal("何仙姑", message)
print(f"何仙姑: {response}")
self.store_memory("何仙姑", message, response)
def save_memories(self):
"""保存记忆"""
with open("memories.json", "w", encoding="utf-8") as f:
json.dump(self.memories, f, ensure_ascii=False, indent=2)
print("💾 记忆已保存到 memories.json")
def main():
"""主函数"""
print("🚀 Memory Bank 测试开始")
test = MemoryBankTest()
test.test_memory_continuity()
test.save_memories()
print("\n✅ 测试完成!")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,37 @@
{
"timestamp": "2025-08-16T15:17:54.175476",
"version": "v2.1.0",
"test_results": {
"priority_algorithm_integration": true,
"flow_controller_integration": true,
"health_monitor_integration": true,
"performance_under_load": true,
"data_consistency": true,
"chat_coordinator_integration": true,
"cross_component_integration": true
},
"performance_metrics": {
"total_operations": 400,
"duration": 0.006308555603027344,
"ops_per_second": 63405.956160241876,
"avg_operation_time": 0.01577138900756836,
"concurrent_threads": 5,
"errors": 0
},
"error_log": [],
"summary": {
"pass_rate": 100.0,
"total_tests": 7,
"passed_tests": 7,
"failed_tests": 0,
"performance_metrics": {
"total_operations": 400,
"duration": 0.006308555603027344,
"ops_per_second": 63405.956160241876,
"avg_operation_time": 0.01577138900756836,
"concurrent_threads": 5,
"errors": 0
},
"error_count": 0
}
}