import os import subprocess # Set environment variables os.environ['OPENAI_API_KEY'] = 'sk-55f6e57f1d834b0e93ceaf98cc2cb715' os.environ['DEEPSEEK_API_KEY'] = 'sk-55f6e57f1d834b0e93ceaf98cc2cb715' os.environ['PYTHONIOENCODING'] = 'utf-8' os.environ['OLLAMA_EMBEDDING_MODEL'] = 'snowflake-arctic-embed:latest' os.environ['OLLAMA_RERANKER_MODEL'] = 'jina-reranker-v2:latest' # Updated to v2 model os.environ['OPENAI_API_MODEL'] = 'deepseek-chat' os.environ['OPENAI_API_BASE'] = 'https://api.deepseek.com/v1' os.environ['LLM_BINDING_HOST'] = 'https://api.deepseek.com/v1' # Ollama rerank configuration - using local Ollama server os.environ['RERANK_BINDING_HOST'] = 'http://localhost:11434' # Local Ollama server os.environ['RERANK_BINDING_API_KEY'] = '' # No API key needed for local Ollama # Set database environment variables os.environ['REDIS_URI'] = 'redis://localhost:6379' os.environ['NEO4J_URI'] = 'bolt://localhost:7687' os.environ['NEO4J_USERNAME'] = 'neo4j' os.environ['NEO4J_PASSWORD'] = 'jleu1212' os.environ['QDRANT_URI'] = 'http://localhost:6333/' os.environ['POSTGRES_URI'] = 'postgresql://jleu3482:jleu1212@localhost:5432/rag_anything' # Command to start the LightRAG server with entity extraction disabled cmd = [ 'python', '-m', 'lightrag.api.lightrag_server', '--port', '3015', '--working-dir', 'rag_storage', '--input-dir', 'inputs', '--key', 'jleu1212', '--auto-scan-at-startup', '--llm-binding', 'openai', '--embedding-binding', 'ollama', '--rerank-binding', 'ollama', # Changed from 'jina' to 'ollama' for local Ollama rerank '--summary-max-tokens', '0', # Disable entity extraction by setting summary tokens to 0 '--timeout', '600' # Increase server timeout to 600 seconds to avoid nginx 504 ] # Run the server subprocess.run(cmd)