ssearch/query_topk_prompt_engine_v2.py
Eric e9fc99ddc6 Initial commit: RAG pipeline for semantic search over personal journal archive
Vector search with cross-encoder re-ranking, hybrid BM25+vector retrieval,
incremental index updates, and multiple LLM backends (Ollama local, OpenAI API).
2026-02-20 06:02:28 -05:00

125 lines
No EOL
4.1 KiB
Python

# query_topk_prompt_engine_v2.py
# Run a querry on a vector store
#
# This version uses an improved prompt that is more flexible and query-adaptive
# Based on query_topk_prompt_engine.py
#
# Implements a prompt and uses the build_exp.py vector store with BAAI/bge-large-en-v1.5
#
# E.M.F. January 2026
from llama_index.core import (
StorageContext,
load_index_from_storage,
ServiceContext,
Settings,
)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.ollama import Ollama
from llama_index.core.prompts import PromptTemplate
import os
import sys
#
# Globals
#
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Embedding model used in vector store (this should match the one in build_exp.py or equivalent)
# embed_model = HuggingFaceEmbedding(model_name="all-mpnet-base-v2")
embed_model = HuggingFaceEmbedding(cache_folder="./models",model_name="BAAI/bge-large-en-v1.5",local_files_only=True)
# LLM model to use in query transform and generation
# command-r7b generates about as quickly as llama3.1:8B, but provides results that stick better
# to the provided context
llm="command-r7b"
# Other models tried:
#llm="llama3.1:8B"
#llm="deepseek-r1:8B"
#llm="gemma3:1b"
#
# Custom prompt for the query engine - Version 2 (improved)
#
# This prompt is more flexible and query-adaptive than v1:
# - Doesn't force artificial structure (exactly 10 files, mandatory theme)
# - Works for factual questions, exploratory queries, and comparisons
# - Emphasizes precision with explicit citations
# - Allows natural synthesis across sources
# - Honest about limitations when context is insufficient
#
PROMPT = PromptTemplate(
"""You are a precise research assistant analyzing excerpts from a document collection.
CONTEXT (ranked by relevance):
{context_str}
QUERY:
{query_str}
Instructions:
- Answer ONLY using information explicitly present in the CONTEXT above
- Prioritize higher-ranked excerpts but don't ignore lower ones if they contain unique relevant information
- Be specific: cite file names and quote/paraphrase key passages when relevant
- If the context doesn't contain enough information to answer fully, say so
- Synthesize information across multiple sources when appropriate
Your response should:
1. Directly answer the query using the context
2. Reference specific files and their content (e.g., "In <filename>, ...")
3. List all relevant source files at the end with brief relevance notes
If you find relevant information, organize it clearly. If the context is insufficient, explain what's missing."""
)
#
# Main program routine
#
def main():
# Use a local model to generate -- in this case using Ollama
Settings.llm = Ollama(
model=llm, # First model tested
request_timeout=360.0,
context_window=8000
)
# Load embedding model (same as used for vector store)
Settings.embed_model = embed_model
# Load persisted vector store + metadata
storage_context = StorageContext.from_defaults(persist_dir="./storage_exp")
index = load_index_from_storage(storage_context)
# Build regular query engine with custom prompt
query_engine = index.as_query_engine(
similarity_top_k=15, # pull wide
#response_mode="compact" # concise synthesis
text_qa_template=PROMPT, # custom prompt (v2)
# node_postprocessors=[
# SimilarityPostprocessor(similarity_cutoff=0.75) # keep strong hits; makes result count flexible
# ],
)
# Query
if len(sys.argv) < 2:
print("Usage: python query.py QUERY_TEXT")
sys.exit(1)
q = " ".join(sys.argv[1:])
# Generate the response by querying the engine
# This performes the similarity search and then applies the prompt
response = query_engine.query(q)
# Return the query response and source documents
print("\nResponse:\n")
print(response.response)
print("\nSource documents:")
for node in response.source_nodes:
meta = getattr(node, "metadata", None) or node.node.metadata
print(f"{meta.get('file_name')} {meta.get('file_path')} {getattr(node, 'score', None):.3f}")
if __name__ == "__main__":
main()