Vector search with cross-encoder re-ranking, hybrid BM25+vector retrieval, incremental index updates, and multiple LLM backends (Ollama local, OpenAI API).
123 lines
3.8 KiB
Python
123 lines
3.8 KiB
Python
# query_topk_prompt.py
|
|
# Run a querry on a vector store
|
|
#
|
|
# This version from query_rewrite_hyde.py, but removing hyde and using a custom prompt
|
|
# This verison implements a prompt and uses the build_exp.py vector store with BAAI/bge-large-en-v1.5
|
|
# Based on query_exp.py->query_topk.py->query_rewrite_hyde.py
|
|
# The results are as good as with HyDE.
|
|
#
|
|
# E.M.F. August 2025
|
|
|
|
from llama_index.core import (
|
|
StorageContext,
|
|
load_index_from_storage,
|
|
ServiceContext,
|
|
Settings,
|
|
)
|
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
from llama_index.llms.ollama import Ollama
|
|
from llama_index.core.prompts import PromptTemplate
|
|
import os
|
|
|
|
#
|
|
# Globals
|
|
#
|
|
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
|
|
|
# Embedding model used in vector store (this should match the one in build_exp.py or equivalent)
|
|
# embed_model = HuggingFaceEmbedding(model_name="all-mpnet-base-v2")
|
|
embed_model = HuggingFaceEmbedding(cache_folder="./models",model_name="BAAI/bge-large-en-v1.5")
|
|
|
|
# LLM model to use in query transform and generation
|
|
# command-r7b generates about as quickly as llama3.1:8B, but provides results that stick better
|
|
# to the provided context
|
|
llm="command-r7b"
|
|
# Other models tried:
|
|
#llm="llama3.1:8B"
|
|
#llm="deepseek-r1:8B"
|
|
#llm="gemma3:1b"
|
|
|
|
#
|
|
# Custom prompt for the query engine
|
|
#
|
|
PROMPT = PromptTemplate(
|
|
"""You are an expert research assistant. You are given top-ranked writing excerpts (CONTEXT) and a user's QUERY.
|
|
|
|
Instructions:
|
|
- Base your response *only* on the CONTEXT.
|
|
- The snippets are ordered from most to least relevant—prioritize insights from earlier (higher-ranked) snippets.
|
|
- Aim to reference *as many distinct* relevant files as possible (up to 10).
|
|
- Do not invent or generalize; refer to specific passages or facts only.
|
|
- If a passage only loosely matches, deprioritize it.
|
|
|
|
Format your answer in two parts:
|
|
|
|
1. **Summary Theme**
|
|
Summarize the dominant theme from the relevant context in a few sentences.
|
|
|
|
2. **Matching Files**
|
|
Make a list of 10 matching files. The format for each should be:
|
|
<filename> -
|
|
<rationale tied to content. Include date or section hints if available.>
|
|
|
|
CONTEXT:
|
|
{context_str}
|
|
|
|
QUERY:
|
|
{query_str}
|
|
|
|
Now provide the theme and list of matching files."""
|
|
)
|
|
|
|
#
|
|
# Main program routine
|
|
#
|
|
|
|
def main():
|
|
# Use a local model to generate -- in this case using Ollama
|
|
Settings.llm = Ollama(
|
|
model=llm, # First model tested
|
|
request_timeout=360.0,
|
|
context_window=8000
|
|
)
|
|
|
|
# Load embedding model (same as used for vector store)
|
|
Settings.embed_model = embed_model
|
|
|
|
# Load persisted vector store + metadata
|
|
storage_context = StorageContext.from_defaults(persist_dir="./storage_exp")
|
|
index = load_index_from_storage(storage_context)
|
|
|
|
# Build regular query engine with custom prompt
|
|
query_engine = index.as_query_engine(
|
|
similarity_top_k=15, # pull wide
|
|
#response_mode="compact" # concise synthesis
|
|
text_qa_template=PROMPT, # custom prompt
|
|
# node_postprocessors=[
|
|
# SimilarityPostprocessor(similarity_cutoff=0.75) # keep strong hits; makes result count flexible
|
|
# ],
|
|
)
|
|
|
|
# Query
|
|
while True:
|
|
q = input("\nEnter a search topic or question (or 'exit'): ").strip()
|
|
if q.lower() in ("exit", "quit"):
|
|
break
|
|
print()
|
|
|
|
# Generate the response by querying the engine
|
|
# This performes the similarity search and then applies the prompt
|
|
response = query_engine.query(q)
|
|
|
|
# Return the query response and source documents
|
|
print(response.response)
|
|
|
|
|
|
print("\nSource documents:")
|
|
for node in response.source_nodes:
|
|
meta = getattr(node, "metadata", None) or node.node.metadata
|
|
print(f"{meta.get('file_name')} {meta.get('file_path')} {getattr(node, 'score', None)}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|