28 lines
835 B
Python
28 lines
835 B
Python
import asyncio
|
|
from openai import AsyncOpenAI
|
|
|
|
async def main():
|
|
# Test remote LiteLLM server without MCP
|
|
client = AsyncOpenAI(
|
|
api_key="sk-0jdcGHZJpX2oUJmyEs7zVA",
|
|
base_url="https://litellm.seekkey.tech"
|
|
)
|
|
|
|
try:
|
|
# Test simple chat completion
|
|
response = await client.chat.completions.create(
|
|
model="gemini/gemini-2.5-flash",
|
|
messages=[
|
|
{"role": "user", "content": "Hello! Please respond with a simple greeting."}
|
|
],
|
|
max_tokens=50
|
|
)
|
|
|
|
print("✅ Remote LiteLLM server is working!")
|
|
print(f"Response: {response.choices[0].message.content}")
|
|
|
|
except Exception as e:
|
|
print(f"❌ Error connecting to remote server: {e}")
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main()) |