ssearch/retrieve.py
Eric Furst 42e5e20e17 Test clean deploy
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-27 05:59:01 -05:00

140 lines
4.1 KiB
Python

# retrieve.py
# Hybrid verbatim chunk retrieval: BM25 + vector search + cross-encoder, no LLM.
#
# Same hybrid retrieval as query_hybrid.py but outputs raw chunk text
# instead of LLM synthesis. Useful for inspecting what the hybrid pipeline
# retrieves.
#
# Each chunk is annotated with its source (vector, BM25, or both) so you can
# see which retriever nominated it.
#
# E.M.F. February 2026
# Environment vars must be set before importing huggingface/transformers
# libraries, because huggingface_hub.constants evaluates HF_HUB_OFFLINE
# at import time.
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["SENTENCE_TRANSFORMERS_HOME"] = "./models"
os.environ["HF_HUB_OFFLINE"] = "1"
from llama_index.core import (
StorageContext,
load_index_from_storage,
Settings,
)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.postprocessor import SentenceTransformerRerank
from llama_index.retrievers.bm25 import BM25Retriever
import sys
import textwrap
#
# Globals
#
# Embedding model (must match build_store.py)
EMBED_MODEL = HuggingFaceEmbedding(cache_folder="./models", model_name="BAAI/bge-large-en-v1.5", local_files_only=True)
# Cross-encoder model for re-ranking (cached in ./models/)
RERANK_MODEL = "cross-encoder/ms-marco-MiniLM-L-12-v2"
RERANK_TOP_N = 15
# Retrieval parameters
VECTOR_TOP_K = 20
BM25_TOP_K = 20
# Output formatting
WRAP_WIDTH = 80
def main():
# No LLM needed -- set embed model only
Settings.embed_model = EMBED_MODEL
# Load persisted vector store
storage_context = StorageContext.from_defaults(persist_dir="./store")
index = load_index_from_storage(storage_context)
# --- Retrievers ---
vector_retriever = index.as_retriever(similarity_top_k=VECTOR_TOP_K)
bm25_retriever = BM25Retriever.from_defaults(
index=index,
similarity_top_k=BM25_TOP_K,
)
# Cross-encoder re-ranker
reranker = SentenceTransformerRerank(
model=RERANK_MODEL,
top_n=RERANK_TOP_N,
)
# Query
if len(sys.argv) < 2:
print("Usage: python retrieve_hybrid_raw.py QUERY_TEXT")
sys.exit(1)
q = " ".join(sys.argv[1:])
# Retrieve from both sources
vector_nodes = vector_retriever.retrieve(q)
bm25_nodes = bm25_retriever.retrieve(q)
# Track which retriever found each node
vector_ids = {n.node.node_id for n in vector_nodes}
bm25_ids = {n.node.node_id for n in bm25_nodes}
# Merge and deduplicate by node ID
seen_ids = set()
merged = []
for node in vector_nodes + bm25_nodes:
node_id = node.node.node_id
if node_id not in seen_ids:
seen_ids.add(node_id)
merged.append(node)
# Re-rank merged candidates
reranked = reranker.postprocess_nodes(merged, query_str=q)
# Retrieval stats
n_both = len(vector_ids & bm25_ids)
n_vector_only = len(vector_ids - bm25_ids)
n_bm25_only = len(bm25_ids - vector_ids)
print(f"\nQuery: {q}")
print(f"Vector: {len(vector_nodes)}, BM25: {len(bm25_nodes)}, "
f"overlap: {n_both}, merged: {len(merged)}, re-ranked to: {len(reranked)}")
print(f" vector-only: {n_vector_only}, bm25-only: {n_bm25_only}, both: {n_both}\n")
# Output re-ranked chunks with source annotation
for i, node in enumerate(reranked, 1):
meta = getattr(node, "metadata", None) or node.node.metadata
score = getattr(node, "score", None)
file_name = meta.get("file_name", "unknown")
text = node.get_content()
node_id = node.node.node_id
# Annotate source
in_vector = node_id in vector_ids
in_bm25 = node_id in bm25_ids
if in_vector and in_bm25:
source = "vector+bm25"
elif in_bm25:
source = "bm25-only"
else:
source = "vector-only"
print("=" * WRAP_WIDTH)
print(f"=== [{i}] {file_name} (score: {score:.3f}) [{source}]")
print("=" * WRAP_WIDTH)
for line in text.splitlines():
if line.strip():
print(textwrap.fill(line, width=WRAP_WIDTH))
else:
print()
print()
if __name__ == "__main__":
main()