Files
railseek6/verify_ollama_fix.py

179 lines
6.6 KiB
Python

#!/usr/bin/env python3
"""
Verify that Ollama is correctly detected and available
This script tests multiple scenarios that could cause "ollama not available" errors
"""
import requests
import sys
import os
def test_direct_connection():
"""Test direct connection to Ollama"""
print("1. Testing direct connection to Ollama...")
# Test both localhost and 127.0.0.1
hosts = [
("http://127.0.0.1:11434", "127.0.0.1"),
("http://localhost:11434", "localhost"),
]
for host_url, host_name in hosts:
try:
response = requests.get(f"{host_url}/api/tags", timeout=5)
if response.status_code == 200:
print(f"{host_name}: Connection successful (HTTP {response.status_code})")
return True, host_url
else:
print(f"{host_name}: HTTP {response.status_code}")
except Exception as e:
print(f"{host_name}: {type(e).__name__}: {e}")
return False, None
def test_required_models(host_url):
"""Test that required models are available"""
print("\n2. Checking for required models...")
required_models = [
'snowflake-arctic-embed:latest',
'jina-reranker-v2:latest',
'snowflake-arctic-embed2:latest', # Alternative
'bge-m3:latest', # Alternative
]
try:
response = requests.get(f"{host_url}/api/tags", timeout=5)
if response.status_code == 200:
data = response.json()
models = data.get('models', [])
model_names = [model['name'] for model in models]
print(f" Found {len(models)} models total")
# Check each required model
all_found = True
for req_model in required_models:
found = any(req_model in name for name in model_names)
if found:
print(f"{req_model}: Available")
else:
print(f" ⚠️ {req_model}: Not found (but may not be required)")
if req_model in ['snowflake-arctic-embed:latest', 'jina-reranker-v2:latest']:
all_found = False
return all_found
else:
print(f" ❌ Failed to get models: HTTP {response.status_code}")
return False
except Exception as e:
print(f" ❌ Error checking models: {e}")
return False
def test_lightrag_configuration():
"""Test LightRAG configuration for Ollama"""
print("\n3. Checking LightRAG configuration...")
# Check environment variables
env_vars = [
('LLM_BINDING_HOST', 'http://localhost:11434'),
('EMBEDDING_BINDING_HOST', 'http://localhost:11434'),
('OLLAMA_URL', 'http://127.0.0.1:11434'),
]
all_good = True
for var_name, default_value in env_vars:
value = os.environ.get(var_name)
if value:
print(f"{var_name} = {value}")
else:
print(f" ⚠️ {var_name} not set (will use default: {default_value})")
# Check production config
try:
sys.path.insert(0, 'LightRAG-main')
from lightrag.production_config import config
ollama_url = config.models.OLLAMA_URL
print(f" ✅ LightRAG config OLLAMA_URL = {ollama_url}")
except ImportError:
print(" ⚠️ Could not import LightRAG config")
except Exception as e:
print(f" ⚠️ Error reading LightRAG config: {e}")
return all_good
def test_common_scripts():
"""Test that common scripts would detect Ollama correctly"""
print("\n4. Testing common script detection...")
# Simulate test_production_workflow_auth.py check
print(" Simulating test_production_workflow_auth.py check...")
try:
response = requests.get("http://127.0.0.1:11434/api/tags", timeout=10)
if response.status_code == 200:
models = response.json().get('models', [])
model_names = [model['name'] for model in models]
# Check for required models from that script
required = ['snowflake-arctic-embed:latest', 'jina-reranker-v2:latest']
for req_model in required:
if any(req_model in name for name in model_names):
print(f" ✅ Script would find: {req_model}")
else:
print(f" ❌ Script would NOT find: {req_model}")
return False
print(" ✅ Script would pass Ollama check")
return True
else:
print(f" ❌ Script would fail: HTTP {response.status_code}")
return False
except Exception as e:
print(f" ❌ Script would fail with error: {e}")
return False
def main():
print("=" * 60)
print("VERIFYING OLLAMA AVAILABILITY FIX")
print("=" * 60)
# Test 1: Direct connection
connection_ok, host_url = test_direct_connection()
if not connection_ok:
print("\n❌ CRITICAL: Cannot connect to Ollama at all")
print(" Ollama might not be running or is on a different port")
print(" Check: Is Ollama running? Try: ollama serve")
sys.exit(1)
# Test 2: Required models
models_ok = test_required_models(host_url)
# Test 3: Configuration
config_ok = test_lightrag_configuration()
# Test 4: Script detection
script_ok = test_common_scripts()
print("\n" + "=" * 60)
print("VERIFICATION SUMMARY")
print("=" * 60)
print(f"✅ Direct connection: {'PASS' if connection_ok else 'FAIL'}")
print(f"✅ Required models: {'PASS' if models_ok else 'WARNING (some missing)'}")
print(f"✅ Configuration: {'PASS' if config_ok else 'WARNING'}")
print(f"✅ Script detection: {'PASS' if script_ok else 'FAIL'}")
if connection_ok and script_ok:
print("\n🎉 SUCCESS: Ollama is correctly available and detectable")
print(" The 'ollama not available' error was likely transient or")
print(" from a different context (e.g., LightRAG server startup).")
print("\n💡 If you still see 'ollama not available' errors:")
print(" 1. Check that Ollama is running: ollama serve")
print(" 2. Check firewall/network settings")
print(" 3. Try restarting Ollama: ollama stop && ollama serve")
sys.exit(0)
else:
print("\n⚠️ WARNING: Some checks failed")
print(" Ollama is running but there might be configuration issues")
sys.exit(1)
if __name__ == "__main__":
main()