58 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Python
		
	
	
	
			
		
		
	
	
			58 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Python
		
	
	
	
import asyncio
 | 
						|
from openai import AsyncOpenAI
 | 
						|
 | 
						|
async def test_gpt5_nano():
 | 
						|
    """测试调用LiteLLM的gpt5-nano模型"""
 | 
						|
    print("正在测试gpt5-nano模型...")
 | 
						|
    
 | 
						|
    # 使用远程LiteLLM服务器
 | 
						|
    client = AsyncOpenAI(
 | 
						|
        api_key="sk-0jdcGHZJpX2oUJmyEs7zVA", 
 | 
						|
        base_url="https://litellm.seekkey.tech"
 | 
						|
    )
 | 
						|
    
 | 
						|
    try:
 | 
						|
        # 调用gpt-5-nano模型
 | 
						|
        response = await client.chat.completions.create(
 | 
						|
            model="gpt-5-nano",
 | 
						|
            messages=[
 | 
						|
                {"role": "user", "content": "你好,请简单介绍一下你自己。"}
 | 
						|
            ],
 | 
						|
            max_completion_tokens=150,
 | 
						|
            temperature=0.7
 | 
						|
        )
 | 
						|
        
 | 
						|
        print("\n=== GPT-5-Nano 响应 ===")
 | 
						|
        print(f"模型: {response.model}")
 | 
						|
        print(f"响应内容: {response.choices[0].message.content}")
 | 
						|
        print(f"Token使用: {response.usage.total_tokens if response.usage else 'N/A'}")
 | 
						|
        
 | 
						|
    except Exception as e:
 | 
						|
        print(f"调用失败: {e}")
 | 
						|
        print(f"错误类型: {type(e).__name__}")
 | 
						|
        import traceback
 | 
						|
        print(f"详细错误信息: {traceback.format_exc()}")
 | 
						|
        
 | 
						|
        # 尝试使用其他可用模型
 | 
						|
        print("\n尝试使用其他模型...")
 | 
						|
        try:
 | 
						|
            response = await client.chat.completions.create(
 | 
						|
                model="fireworks_ai/accounts/fireworks/models/deepseek-v3-0324",
 | 
						|
                messages=[
 | 
						|
                    {"role": "user", "content": "你好,请简单介绍一下你自己。"}
 | 
						|
                ],
 | 
						|
                max_tokens=150,
 | 
						|
                temperature=0.7
 | 
						|
            )
 | 
						|
            print("\n=== DeepSeek-V3 响应 ===")
 | 
						|
            print(f"模型: {response.model}")
 | 
						|
            print(f"响应内容: {response.choices[0].message.content}")
 | 
						|
            print(f"Token使用: {response.usage.total_tokens if response.usage else 'N/A'}")
 | 
						|
        except Exception as fallback_error:
 | 
						|
            print(f"备用模型也失败: {fallback_error}")
 | 
						|
    
 | 
						|
    finally:
 | 
						|
        await client.close()
 | 
						|
 | 
						|
if __name__ == "__main__":
 | 
						|
    asyncio.run(test_gpt5_nano()) |