ssearch/query_hybrid.py
Eric Furst 13785d667a Rename storage_exp/ to store/, remove unused storage/
Update all active scripts, .gitignore, CLAUDE.md, and README.md.
Also fix stale filename references in script header comments.
2026-02-26 16:36:57 -05:00

176 lines
5.7 KiB
Python

# query_hybrid.py
# Hybrid retrieval: BM25 (sparse) + vector similarity (dense) + cross-encoder
#
# Combines two retrieval strategies to catch both exact term matches and
# semantic similarity:
# 1. Retrieve top-20 via vector similarity (bi-encoder, catches meaning)
# 2. Retrieve top-20 via BM25 (term frequency, catches exact names/dates)
# 3. Merge and deduplicate candidates by node ID
# 4. Re-rank the union with a cross-encoder -> top-15
# 5. Pass re-ranked chunks to LLM for synthesis
#
# The cross-encoder doesn't care where candidates came from -- it scores
# each (query, chunk) pair on its own merits. BM25's job is just to
# nominate candidates that vector similarity might miss.
#
# E.M.F. February 2026
# Environment vars must be set before importing huggingface/transformers
# libraries, because huggingface_hub.constants evaluates HF_HUB_OFFLINE
# at import time.
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["SENTENCE_TRANSFORMERS_HOME"] = "./models"
os.environ["HF_HUB_OFFLINE"] = "1"
from llama_index.core import (
StorageContext,
load_index_from_storage,
Settings,
get_response_synthesizer,
)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.ollama import Ollama
from llama_index.core.prompts import PromptTemplate
from llama_index.core.postprocessor import SentenceTransformerRerank
from llama_index.retrievers.bm25 import BM25Retriever
import sys
#
# Globals
#
# Embedding model (must match build_store.py)
EMBED_MODEL = HuggingFaceEmbedding(cache_folder="./models", model_name="BAAI/bge-large-en-v1.5", local_files_only=True)
# LLM model for generation
LLM_MODEL = "command-r7b"
# Cross-encoder model for re-ranking (cached in ./models/)
RERANK_MODEL = "cross-encoder/ms-marco-MiniLM-L-12-v2"
RERANK_TOP_N = 15
# Retrieval parameters
VECTOR_TOP_K = 20 # candidates from vector similarity
BM25_TOP_K = 20 # candidates from BM25 term matching
#
# Custom prompt -- same as v3
#
PROMPT = PromptTemplate(
"""You are a precise research assistant analyzing excerpts from a personal journal collection.
Every excerpt below has been selected and ranked for relevance to the query.
CONTEXT (ranked by relevance):
{context_str}
QUERY:
{query_str}
Instructions:
- Answer ONLY using information explicitly present in the CONTEXT above
- Examine ALL provided excerpts, not just the top few -- each one was selected for relevance
- Be specific: quote or closely paraphrase key passages and cite their file names
- When multiple files touch on the query, note what each one contributes
- If the context doesn't contain enough information to answer fully, say so
Your response should:
1. Directly answer the query, drawing on as many relevant excerpts as possible
2. Reference specific files and their content (e.g., "In <filename>, ...")
3. End with a list of all files that contributed to your answer, with a brief note on each
If the context is insufficient, explain what's missing."""
)
def main():
# Configure LLM and embedding model
# for local model using ollama
# Note: Ollama temperature defaults to 0.8
Settings.llm = Ollama(
model=LLM_MODEL,
temperature=0.3,
request_timeout=360.0,
context_window=8000,
)
# Use OpenAI API:
# from llama_index.llms.openai import OpenAI
# Settings.llm = OpenAI(
# model="gpt-4o-mini", # or "gpt-4o" for higher quality
# temperature=0.3,
# )
Settings.embed_model = EMBED_MODEL
# Load persisted vector store
storage_context = StorageContext.from_defaults(persist_dir="./store")
index = load_index_from_storage(storage_context)
# --- Retrievers ---
# Vector retriever (dense: cosine similarity over embeddings)
vector_retriever = index.as_retriever(similarity_top_k=VECTOR_TOP_K)
# BM25 retriever (sparse: term frequency scoring)
bm25_retriever = BM25Retriever.from_defaults(
index=index,
similarity_top_k=BM25_TOP_K,
)
# Cross-encoder re-ranker
reranker = SentenceTransformerRerank(
model=RERANK_MODEL,
top_n=RERANK_TOP_N,
)
# --- Query ---
if len(sys.argv) < 2:
print("Usage: python query_hybrid_bm25_v4.py QUERY_TEXT")
sys.exit(1)
q = " ".join(sys.argv[1:])
# Retrieve from both sources
vector_nodes = vector_retriever.retrieve(q)
bm25_nodes = bm25_retriever.retrieve(q)
# Merge and deduplicate by node ID
seen_ids = set()
merged = []
for node in vector_nodes + bm25_nodes:
node_id = node.node.node_id
if node_id not in seen_ids:
seen_ids.add(node_id)
merged.append(node)
# Re-rank the merged candidates with cross-encoder
reranked = reranker.postprocess_nodes(merged, query_str=q)
# Report retrieval stats
n_vector_only = len([n for n in vector_nodes if n.node.node_id not in {b.node.node_id for b in bm25_nodes}])
n_bm25_only = len([n for n in bm25_nodes if n.node.node_id not in {v.node.node_id for v in vector_nodes}])
n_both = len(vector_nodes) + len(bm25_nodes) - len(merged)
print(f"\nQuery: {q}")
print(f"Vector: {len(vector_nodes)}, BM25: {len(bm25_nodes)}, "
f"overlap: {n_both}, merged: {len(merged)}, re-ranked to: {len(reranked)}")
# Synthesize response with LLM
synthesizer = get_response_synthesizer(text_qa_template=PROMPT)
response = synthesizer.synthesize(q, nodes=reranked)
# Output
print("\nResponse:\n")
print(response.response)
print("\nSource documents:")
for node in response.source_nodes:
meta = getattr(node, "metadata", None) or node.node.metadata
score = getattr(node, "score", None)
print(f"{meta.get('file_name')} {meta.get('file_path')} {score:.3f}")
if __name__ == "__main__":
main()