#!/usr/bin/env python3 """ Test LightRAG server with Ollama rerank integration """ import requests import json import time import sys import os def test_server_health(): """Test if server is running""" print("=== Testing LightRAG Server Health ===") try: response = requests.get("http://localhost:3015/health", timeout=5) if response.status_code == 200: health = response.json() print(f"✅ Server is running: {health.get('status', 'unknown')}") print(f" Version: {health.get('version', 'unknown')}") return True else: print(f"❌ Server returned status {response.status_code}") return False except Exception as e: print(f"❌ Cannot connect to server: {e}") return False def test_server_config(): """Test server configuration""" print("\n=== Testing Server Configuration ===") try: response = requests.get("http://localhost:3015/config", timeout=5) if response.status_code == 200: config = response.json() rerank_binding = config.get('rerank_binding', 'unknown') print(f"✅ Rerank binding: {rerank_binding}") if rerank_binding == 'ollama': print("✅ Server is configured for Ollama rerank!") return True else: print(f"❌ Server is using {rerank_binding}, not ollama") return False else: print(f"❌ Could not fetch config: {response.status_code}") return False except Exception as e: print(f"❌ Error fetching config: {e}") return False def test_ollama_connection(): """Test Ollama server connection""" print("\n=== Testing Ollama Connection ===") try: response = requests.get("http://localhost:11434/api/tags", timeout=5) if response.status_code == 200: models = response.json().get("models", []) jina_models = [m for m in models if 'jina-reranker' in m.get('name', '')] if jina_models: print(f"✅ Ollama is running with Jina rerank model: {jina_models[0]['name']}") return True else: print("❌ No Jina rerank models found in Ollama") return False else: print(f"❌ Ollama server returned status {response.status_code}") return False except Exception as e: print(f"❌ Cannot connect to Ollama: {e}") return False def test_rerank_functionality(): """Test actual rerank functionality through LightRAG API""" print("\n=== Testing Rerank Functionality ===") # First, we need to check if there are any documents in the system # Let's try a simple query to see if rerank is working test_query = { "query": "What is artificial intelligence?", "workspace": "default", "top_k": 5, "history_turns": 0 } try: print("Sending test query to LightRAG...") start_time = time.time() response = requests.post( "http://localhost:3015/api/query", json=test_query, headers={"Content-Type": "application/json"}, timeout=30 ) end_time = time.time() elapsed = end_time - start_time if response.status_code == 200: result = response.json() print(f"✅ Query successful (took {elapsed:.2f}s)") # Check if rerank was used if 'reranked_chunks' in result or 'chunks' in result: chunks = result.get('reranked_chunks', result.get('chunks', [])) if chunks: print(f"✅ Retrieved {len(chunks)} chunks") # Check if chunks have scores (indicating reranking) first_chunk = chunks[0] if chunks else {} if 'score' in first_chunk or 'relevance_score' in first_chunk: print("✅ Rerank scores present in results") return True else: print("⚠️ No rerank scores in results (may be using null rerank)") return False else: print("⚠️ No chunks returned (may be no documents in system)") return True # Not an error, just no data else: print("⚠️ No chunks in response") return True else: print(f"❌ Query failed with status {response.status_code}") print(f"Response: {response.text[:200]}") return False except Exception as e: print(f"❌ Error during query test: {e}") import traceback traceback.print_exc() return False def test_direct_rerank_api(): """Test the rerank API directly if available""" print("\n=== Testing Direct Rerank API ===") # Check if rerank endpoint exists try: # First check OpenAPI spec response = requests.get("http://localhost:3015/openapi.json", timeout=5) if response.status_code == 200: openapi = response.json() paths = openapi.get('paths', {}) rerank_paths = [p for p in paths.keys() if 'rerank' in p.lower()] if rerank_paths: print(f"✅ Rerank endpoints found: {rerank_paths}") # Try to call rerank endpoint test_data = { "query": "test query", "documents": [ "Artificial intelligence is the simulation of human intelligence.", "Machine learning is a subset of AI.", "Deep learning uses neural networks." ] } # Use the first rerank endpoint endpoint = rerank_paths[0] print(f"Testing endpoint: {endpoint}") rerank_response = requests.post( f"http://localhost:3015{endpoint}", json=test_data, headers={"Content-Type": "application/json"}, timeout=10 ) if rerank_response.status_code == 200: result = rerank_response.json() print(f"✅ Direct rerank API works! Got {len(result.get('results', []))} results") return True else: print(f"⚠️ Direct rerank API returned {rerank_response.status_code}") return False else: print("⚠️ No rerank endpoints in OpenAPI (may be internal only)") return True else: print(f"⚠️ Could not fetch OpenAPI: {response.status_code}") return True except Exception as e: print(f"⚠️ Error testing direct rerank API: {e}") return True # Not critical def main(): """Run all tests""" print("LightRAG Ollama Rerank Integration Test") print("=" * 60) # Run tests health_ok = test_server_health() config_ok = test_server_config() if health_ok else False ollama_ok = test_ollama_connection() rerank_ok = test_rerank_functionality() if health_ok else False direct_ok = test_direct_rerank_api() if health_ok else False # Summary print("\n" + "=" * 60) print("TEST RESULTS SUMMARY") print("=" * 60) results = { "Server Health": health_ok, "Ollama Configuration": config_ok, "Ollama Connection": ollama_ok, "Rerank Functionality": rerank_ok, "Direct Rerank API": direct_ok } all_passed = True for test_name, passed in results.items(): status = "✅ PASS" if passed else "❌ FAIL" print(f"{test_name:25} {status}") if not passed: all_passed = False print("\n" + "=" * 60) if all_passed: print("🎉 ALL TESTS PASSED! Ollama rerank is working correctly.") else: print("⚠️ SOME TESTS FAILED. Review output above.") print("\n" + "=" * 60) print("NEXT STEPS:") print("1. If server is not running, start it with: cd LightRAG-main && python start_server.py") print("2. Or use the batch file: cd LightRAG-main && zrun.bat") print("3. Verify Ollama has jina-reranker-v2:latest model") print("4. Test with actual documents in the inputs folder") return 0 if all_passed else 1 if __name__ == "__main__": sys.exit(main())