203 lines
6.4 KiB
Python
203 lines
6.4 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Direct DeepSeek API Test to Identify Root Cause
|
|
"""
|
|
|
|
import os
|
|
import requests
|
|
import json
|
|
from openai import AsyncOpenAI, OpenAI
|
|
import asyncio
|
|
|
|
def test_api_keys():
|
|
"""Test both API keys directly with DeepSeek API"""
|
|
print("=== Testing DeepSeek API Keys Directly ===")
|
|
|
|
# Both API keys from the configuration
|
|
api_keys = [
|
|
"sk-55f6e57f1d834b0e93ceaf98cc2cb715", # From zrun.bat
|
|
"sk-338f965efd9e4ae79538ceb0b6b0f717" # From .env
|
|
]
|
|
|
|
base_url = "https://api.deepseek.com/v1"
|
|
|
|
for api_key in api_keys:
|
|
print(f"\n--- Testing API Key: {api_key[:20]}... ---")
|
|
|
|
# Test 1: Direct HTTP request
|
|
try:
|
|
headers = {
|
|
"Authorization": f"Bearer {api_key}",
|
|
"Content-Type": "application/json"
|
|
}
|
|
|
|
data = {
|
|
"model": "deepseek-chat",
|
|
"messages": [{"role": "user", "content": "Hello, test message"}],
|
|
"max_tokens": 50
|
|
}
|
|
|
|
response = requests.post(
|
|
f"{base_url}/chat/completions",
|
|
headers=headers,
|
|
json=data,
|
|
timeout=30
|
|
)
|
|
|
|
print(f"HTTP Status: {response.status_code}")
|
|
if response.status_code == 200:
|
|
print("✅ API Key works!")
|
|
result = response.json()
|
|
print(f"Response: {result.get('choices', [{}])[0].get('message', {}).get('content', 'No content')}")
|
|
else:
|
|
print(f"❌ API Error: {response.text}")
|
|
|
|
except Exception as e:
|
|
print(f"❌ HTTP Request failed: {e}")
|
|
|
|
# Test 2: Using OpenAI client
|
|
try:
|
|
client = OpenAI(
|
|
api_key=api_key,
|
|
base_url=base_url
|
|
)
|
|
|
|
response = client.chat.completions.create(
|
|
model="deepseek-chat",
|
|
messages=[{"role": "user", "content": "Test message"}],
|
|
max_tokens=50
|
|
)
|
|
|
|
print("✅ OpenAI Client works!")
|
|
print(f"Response: {response.choices[0].message.content}")
|
|
|
|
except Exception as e:
|
|
print(f"❌ OpenAI Client failed: {e}")
|
|
|
|
def test_lightrag_configuration():
|
|
"""Test the actual LightRAG configuration"""
|
|
print("\n=== Testing LightRAG Configuration ===")
|
|
|
|
# Check which environment variables are set
|
|
env_vars = [
|
|
"OPENAI_API_KEY",
|
|
"OPENAI_BASE_URL",
|
|
"LLM_MODEL",
|
|
"LLM_BINDING",
|
|
"EMBEDDING_MODEL"
|
|
]
|
|
|
|
for var in env_vars:
|
|
value = os.getenv(var)
|
|
if value:
|
|
print(f"✅ {var}: {value[:50]}..." if len(str(value)) > 50 else f"✅ {var}: {value}")
|
|
else:
|
|
print(f"❌ {var}: Not set")
|
|
|
|
def test_server_configuration():
|
|
"""Test current server configuration"""
|
|
print("\n=== Testing Server Configuration ===")
|
|
|
|
try:
|
|
# Test server health
|
|
response = requests.get("http://localhost:3015/health")
|
|
print(f"Server Health: {response.status_code}")
|
|
|
|
if response.status_code == 200:
|
|
# Test search endpoint configuration
|
|
search_data = {
|
|
"query": "artificial intelligence",
|
|
"top_k": 3,
|
|
"mode": "hybrid"
|
|
}
|
|
|
|
response = requests.post(
|
|
"http://localhost:3015/api/search",
|
|
json=search_data
|
|
)
|
|
|
|
print(f"Search Endpoint: {response.status_code}")
|
|
if response.status_code != 200:
|
|
print(f"Search Error: {response.text}")
|
|
|
|
except Exception as e:
|
|
print(f"❌ Server test failed: {e}")
|
|
|
|
async def test_openai_client_config():
|
|
"""Test the OpenAI client configuration used by LightRAG"""
|
|
print("\n=== Testing LightRAG OpenAI Client Configuration ===")
|
|
|
|
from lightrag.llm.openai import create_openai_async_client
|
|
|
|
# Test with both API keys
|
|
api_keys = [
|
|
"sk-55f6e57f1d834b0e93ceaf98cc2cb715",
|
|
"sk-338f965efd9e4ae79538ceb0b6b0f717"
|
|
]
|
|
|
|
base_url = "https://api.deepseek.com/v1"
|
|
|
|
for api_key in api_keys:
|
|
print(f"\n--- Testing with API Key: {api_key[:20]}... ---")
|
|
|
|
try:
|
|
# Create client using LightRAG's function
|
|
client = create_openai_async_client(
|
|
api_key=api_key,
|
|
base_url=base_url
|
|
)
|
|
|
|
# Test the client
|
|
response = await client.chat.completions.create(
|
|
model="deepseek-chat",
|
|
messages=[{"role": "user", "content": "Hello, test message"}],
|
|
max_tokens=50
|
|
)
|
|
|
|
print("✅ LightRAG OpenAI Client works!")
|
|
print(f"Response: {response.choices[0].message.content}")
|
|
|
|
except Exception as e:
|
|
print(f"❌ LightRAG OpenAI Client failed: {e}")
|
|
|
|
def main():
|
|
print("=== DEEPSEEK API ROOT CAUSE ANALYSIS ===")
|
|
|
|
# Test API keys directly
|
|
test_api_keys()
|
|
|
|
# Test environment configuration
|
|
test_lightrag_configuration()
|
|
|
|
# Test server configuration
|
|
test_server_configuration()
|
|
|
|
# Test LightRAG's OpenAI client
|
|
asyncio.run(test_openai_client_config())
|
|
|
|
print("\n" + "="*50)
|
|
print("=== ROOT CAUSE IDENTIFICATION ===")
|
|
print("="*50)
|
|
|
|
print("\n1. API Key Conflict:")
|
|
print(" - zrun.bat uses: sk-55f6e57f1d834b0e93ceaf98cc2cb715")
|
|
print(" - .env uses: sk-338f965efd9e4ae79538ceb0b6b0f717")
|
|
print(" - This creates environment variable conflicts")
|
|
|
|
print("\n2. Regional Restriction:")
|
|
print(" - DeepSeek API blocks requests from certain regions")
|
|
print(" - Error: 'unsupported_country_region_territory'")
|
|
print(" - This is IP-based geolocation blocking")
|
|
|
|
print("\n3. Header Configuration:")
|
|
print(" - Fixed in openai.py to use dynamic headers")
|
|
print(" - Only adds DeepSeek headers when calling DeepSeek API")
|
|
|
|
print("\n=== RECOMMENDED FIX ===")
|
|
print("1. Use consistent API key across all configurations")
|
|
print("2. Consider using VPN/Proxy for DeepSeek API access")
|
|
print("3. Or switch to alternative LLM provider")
|
|
print("4. Test both API keys to see which one works")
|
|
|
|
if __name__ == "__main__":
|
|
main() |