jina rerank working

This commit is contained in:
2026-01-13 09:51:35 +08:00
parent 370fe6368a
commit 9745ca2476
23 changed files with 1967 additions and 6 deletions

View File

@@ -290,6 +290,99 @@ async def ali_rerank(
)
async def ollama_rerank(
query: str,
documents: List[str],
top_n: Optional[int] = None,
api_key: Optional[str] = None,
model: str = "jina-reranker-v2:latest",
base_url: str = "http://localhost:11434",
extra_body: Optional[Dict[str, Any]] = None,
) -> List[Dict[str, Any]]:
"""
Rerank documents using Ollama with Jina rerank models.
This function uses Ollama's embedding API to get embeddings for the query
and documents, then calculates cosine similarity for reranking.
Args:
query: The search query
documents: List of strings to rerank
top_n: Number of top results to return
api_key: API key (not used for Ollama, kept for compatibility)
model: Ollama model name for reranking
base_url: Ollama server URL
extra_body: Additional parameters for Ollama API
Returns:
List of dictionary of ["index": int, "relevance_score": float]
"""
import numpy as np
from lightrag.llm.ollama import ollama_embed
if not documents:
return []
# Get embeddings for query and all documents
all_texts = [query] + documents
try:
# Get embeddings from Ollama
embeddings = await ollama_embed(
texts=all_texts,
embed_model=model,
host=base_url,
api_key=api_key,
options=extra_body or {}
)
if len(embeddings) != len(all_texts):
logger.error(f"Embedding count mismatch: expected {len(all_texts)}, got {len(embeddings)}")
return []
# Extract query embedding (first one) and document embeddings
query_embedding = embeddings[0]
doc_embeddings = embeddings[1:]
# Calculate cosine similarities
similarities = []
for i, doc_embedding in enumerate(doc_embeddings):
# Cosine similarity: dot product of normalized vectors
norm_query = np.linalg.norm(query_embedding)
norm_doc = np.linalg.norm(doc_embedding)
if norm_query == 0 or norm_doc == 0:
similarity = 0.0
else:
similarity = np.dot(query_embedding, doc_embedding) / (norm_query * norm_doc)
# Convert to relevance score (0-1 range, higher is better)
# Cosine similarity ranges from -1 to 1, so we normalize to 0-1
relevance_score = (similarity + 1) / 2
similarities.append((i, relevance_score))
# Sort by relevance score (descending)
similarities.sort(key=lambda x: x[1], reverse=True)
# Apply top_n if specified
if top_n is not None and top_n > 0:
similarities = similarities[:top_n]
# Convert to expected format
results = [
{"index": idx, "relevance_score": float(score)}
for idx, score in similarities
]
logger.debug(f"Ollama rerank completed: {len(results)} results")
return results
except Exception as e:
logger.error(f"Error in ollama_rerank: {str(e)}")
raise
"""Please run this test as a module:
python -m lightrag.rerank
"""