- Rename build_exp_claude.py → build_store.py - Rename query_hybrid_bm25_v4.py → query_hybrid.py - Rename retrieve_hybrid_raw.py → retrieve.py - Archive query_topk_prompt_engine_v3.py (superseded by hybrid) - Archive retrieve_raw.py (superseded by hybrid) - Move build_clippings.py, retrieve_clippings.py → clippings_search/ - Update run_query.sh, README.md, CLAUDE.md for new names
136 lines
4.6 KiB
Python
136 lines
4.6 KiB
Python
# query_topk_prompt_engine_v3.py
|
|
# Run a query on a vector store with cross-encoder re-ranking
|
|
#
|
|
# Based on v2. Adds a cross-encoder re-ranking step:
|
|
# 1. Retrieve top-30 chunks via vector similarity (bi-encoder, fast)
|
|
# 2. Re-rank to top-15 using a cross-encoder (slower but more accurate)
|
|
# 3. Pass re-ranked chunks to LLM for synthesis
|
|
#
|
|
# The cross-encoder scores each (query, chunk) pair jointly, which captures
|
|
# nuance that bi-encoder dot-product similarity misses.
|
|
#
|
|
# E.M.F. February 2026
|
|
|
|
# Environment vars must be set before importing huggingface/transformers
|
|
# libraries, because huggingface_hub.constants evaluates HF_HUB_OFFLINE
|
|
# at import time.
|
|
import os
|
|
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
|
os.environ["SENTENCE_TRANSFORMERS_HOME"] = "./models"
|
|
os.environ["HF_HUB_OFFLINE"] = "1"
|
|
|
|
from llama_index.core import (
|
|
StorageContext,
|
|
load_index_from_storage,
|
|
Settings,
|
|
)
|
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
from llama_index.llms.ollama import Ollama
|
|
from llama_index.core.prompts import PromptTemplate
|
|
from llama_index.core.postprocessor import SentenceTransformerRerank
|
|
import sys
|
|
|
|
#
|
|
# Globals
|
|
#
|
|
|
|
# Embedding model used in vector store (must match build_exp_claude.py)
|
|
EMBED_MODEL = HuggingFaceEmbedding(cache_folder="./models", model_name="BAAI/bge-large-en-v1.5", local_files_only=True)
|
|
|
|
# LLM model for generation
|
|
llm = "command-r7b"
|
|
|
|
# Cross-encoder model for re-ranking (cached in ./models/)
|
|
#RERANK_MODEL = "cross-encoder/ms-marco-MiniLM-L-6-v2"
|
|
RERANK_MODEL = "cross-encoder/ms-marco-MiniLM-L-12-v2"
|
|
#RERANK_MODEL = "cross-encoder/stsb-roberta-base"
|
|
#RERANK_MODEL = "BAAI/bge-reranker-v2-m3"
|
|
RERANK_TOP_N = 15 # keep top 15 after re-ranking
|
|
RETRIEVE_TOP_K = 30 # retrieve wider pool for re-ranker to work with
|
|
|
|
#
|
|
# Custom prompt for the query engine - Version 3
|
|
#
|
|
# Adapted for re-ranked context: every excerpt below has been scored for
|
|
# relevance by a cross-encoder, so even lower-ranked ones are worth examining.
|
|
# The prompt encourages the LLM to draw from all provided excerpts and to
|
|
# note what each distinct file contributes rather than collapsing onto one.
|
|
#
|
|
PROMPT = PromptTemplate(
|
|
"""You are a precise research assistant analyzing excerpts from a personal journal collection.
|
|
Every excerpt below has been selected and ranked for relevance to the query.
|
|
|
|
CONTEXT (ranked by relevance):
|
|
{context_str}
|
|
|
|
QUERY:
|
|
{query_str}
|
|
|
|
Instructions:
|
|
- Answer ONLY using information explicitly present in the CONTEXT above
|
|
- Examine ALL provided excerpts, not just the top few -- each one was selected for relevance
|
|
- Be specific: quote or closely paraphrase key passages and cite their file names
|
|
- When multiple files touch on the query, note what each one contributes
|
|
- If the context doesn't contain enough information to answer fully, say so
|
|
|
|
Your response should:
|
|
1. Directly answer the query, drawing on as many relevant excerpts as possible
|
|
2. Reference specific files and their content (e.g., "In <filename>, ...")
|
|
3. End with a list of all files that contributed to your answer, with a brief note on each
|
|
|
|
If the context is insufficient, explain what's missing."""
|
|
)
|
|
|
|
#
|
|
# Main program routine
|
|
#
|
|
|
|
def main():
|
|
# Use a local model to generate -- in this case using Ollama
|
|
Settings.llm = Ollama(
|
|
model=llm,
|
|
request_timeout=360.0,
|
|
context_window=8000
|
|
)
|
|
|
|
# Load embedding model (same as used for vector store)
|
|
Settings.embed_model = EMBED_MODEL
|
|
|
|
# Load persisted vector store + metadata
|
|
storage_context = StorageContext.from_defaults(persist_dir="./storage_exp")
|
|
index = load_index_from_storage(storage_context)
|
|
|
|
# Cross-encoder re-ranker
|
|
reranker = SentenceTransformerRerank(
|
|
model=RERANK_MODEL,
|
|
top_n=RERANK_TOP_N,
|
|
)
|
|
|
|
# Build query engine: retrieve wide (top-30), re-rank to top-15, then synthesize
|
|
query_engine = index.as_query_engine(
|
|
similarity_top_k=RETRIEVE_TOP_K,
|
|
text_qa_template=PROMPT,
|
|
node_postprocessors=[reranker],
|
|
)
|
|
|
|
# Query
|
|
if len(sys.argv) < 2:
|
|
print("Usage: python query_topk_prompt_engine_v3.py QUERY_TEXT")
|
|
sys.exit(1)
|
|
q = " ".join(sys.argv[1:])
|
|
|
|
# Generate the response by querying the engine
|
|
response = query_engine.query(q)
|
|
|
|
# Return the query response and source documents
|
|
print("\nResponse:\n")
|
|
print(response.response)
|
|
|
|
print("\nSource documents:")
|
|
for node in response.source_nodes:
|
|
meta = getattr(node, "metadata", None) or node.node.metadata
|
|
print(f"{meta.get('file_name')} {meta.get('file_path')} {getattr(node, 'score', None):.3f}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|