liurenchaxin/modules/legacy-support/litellm/list_models.py

64 lines
2.1 KiB
Python

import asyncio
from openai import AsyncOpenAI
async def list_available_models():
"""获取LiteLLM服务器支持的模型列表"""
print("正在获取可用模型列表...")
# 使用远程LiteLLM服务器
client = AsyncOpenAI(
api_key="sk-0jdcGHZJpX2oUJmyEs7zVA",
base_url="https://litellm.seekkey.tech"
)
try:
# 获取模型列表
models = await client.models.list()
print("\n=== 可用模型列表 ===")
for model in models.data:
print(f"- {model.id}")
print(f"\n总共找到 {len(models.data)} 个模型")
# 尝试调用一个简单的模型
if models.data:
first_model = models.data[0].id
print(f"\n正在测试第一个模型: {first_model}")
response = await client.chat.completions.create(
model=first_model,
messages=[
{"role": "user", "content": "Hello, please say hi in Chinese."}
],
max_tokens=50
)
print(f"测试响应: {response.choices[0].message.content}")
except Exception as e:
print(f"获取模型列表失败: {e}")
print(f"错误类型: {type(e).__name__}")
# 尝试直接测试一些常见模型
common_models = ["gpt-4", "gpt-3.5-turbo", "gemini-pro", "claude-3-sonnet"]
print("\n尝试测试常见模型...")
for model in common_models:
try:
print(f"测试模型: {model}")
response = await client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": "Hi"}],
max_tokens=10
)
print(f"{model} 可用")
break
except Exception as model_error:
print(f"{model} 不可用: {str(model_error)[:100]}...")
finally:
await client.close()
if __name__ == "__main__":
asyncio.run(list_available_models())