74 lines
2.5 KiB
Python
74 lines
2.5 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Start LightRAG server with our fixed OCR processor using GPU mode
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import subprocess
|
|
|
|
# Set environment variables for CUDA 11.8 (PaddlePaddle 2.6.0 compatible)
|
|
os.environ['CUDA_PATH'] = r'C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8'
|
|
os.environ['CUDA_HOME'] = os.environ['CUDA_PATH']
|
|
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
|
os.environ['LIGHTRAG_OCR_ENGINE'] = 'paddleocr'
|
|
|
|
# Add CUDA 11.8 to PATH for PaddleOCR GPU acceleration
|
|
cuda_bin = os.path.join(os.environ['CUDA_PATH'], 'bin')
|
|
os.environ['PATH'] = cuda_bin + ';' + os.environ['PATH']
|
|
|
|
# Set DeepSeek API configuration for LLM
|
|
os.environ['OPENAI_API_KEY'] = 'sk-55f6e57f1d834b0e93ceaf98cc2cb715'
|
|
os.environ['OPENAI_API_MODEL'] = 'deepseek-chat'
|
|
os.environ['OPENAI_API_BASE'] = 'https://api.deepseek.com/v1'
|
|
os.environ['PYTHONIOENCODING'] = 'utf-8'
|
|
|
|
# Set LightRAG configuration for LLM binding and model
|
|
os.environ['LLM_BINDING'] = 'openai'
|
|
os.environ['LLM_MODEL'] = 'deepseek-chat'
|
|
|
|
# Set Snowflake Arctic Embed configuration (1024 dimensions)
|
|
os.environ['EMBEDDING_MODEL'] = 'snowflake-arctic-embed:latest'
|
|
os.environ['EMBEDDING_DIM'] = '1024'
|
|
|
|
# Database environment variables
|
|
os.environ['REDIS_URI'] = 'redis://localhost:6379'
|
|
os.environ['NEO4J_URI'] = 'bolt://localhost:7687'
|
|
os.environ['NEO4J_USERNAME'] = 'neo4j'
|
|
os.environ['NEO4J_PASSWORD'] = 'jleu1212'
|
|
os.environ['QDRANT_URI'] = 'http://localhost:6333/'
|
|
os.environ['POSTGRES_URI'] = 'postgresql://jleu3482:jleu1212@localhost:5432/rag_anything'
|
|
|
|
print("🚀 Starting LightRAG Server with Fixed OCR Processor")
|
|
print("=" * 50)
|
|
print("✅ CUDA 11.8 configured for GPU acceleration")
|
|
print("✅ Updated OCR processor with type conversion fixes")
|
|
print("✅ Using local source code for document processing")
|
|
print("🌐 Server will be available at: http://localhost:3015")
|
|
print("=" * 50)
|
|
|
|
# Change to LightRAG-main directory to ensure local code is used
|
|
os.chdir('LightRAG-main')
|
|
|
|
# Start the server using Python module (ensures local code is used)
|
|
cmd = [
|
|
sys.executable, '-m', 'lightrag.api.lightrag_server',
|
|
'--port', '3015',
|
|
'--working-dir', 'rag_storage',
|
|
'--input-dir', '../inputs',
|
|
'--key', 'jleu1212',
|
|
'--auto-scan-at-startup',
|
|
'--llm-binding', 'openai',
|
|
'--embedding-binding', 'ollama',
|
|
'--rerank-binding', 'null'
|
|
]
|
|
|
|
try:
|
|
print("Starting server with command:")
|
|
print(' '.join(cmd))
|
|
print("\nServer logs:")
|
|
subprocess.run(cmd)
|
|
except KeyboardInterrupt:
|
|
print("\nServer stopped by user")
|
|
except Exception as e:
|
|
print(f"Error starting server: {e}") |