Initial commit: RAG demo with build and query scripts
This commit is contained in:
commit
39f1f73e2a
6 changed files with 214 additions and 0 deletions
110
query.py
Normal file
110
query.py
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
# query.py
|
||||
#
|
||||
# Run a query on a vector store
|
||||
#
|
||||
# August 2025
|
||||
# E. M. Furst
|
||||
|
||||
from llama_index.core import (
|
||||
load_index_from_storage,
|
||||
StorageContext,
|
||||
Settings,
|
||||
)
|
||||
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
||||
from llama_index.llms.ollama import Ollama
|
||||
from llama_index.core.prompts import PromptTemplate
|
||||
import os, time
|
||||
|
||||
#
|
||||
# Globals
|
||||
#
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
|
||||
# Embedding model used in vector store (this should match the one in build.py)
|
||||
embed_model = HuggingFaceEmbedding(cache_folder="./models",
|
||||
model_name="BAAI/bge-large-en-v1.5")
|
||||
|
||||
# LLM model to use in query transform and generation
|
||||
llm = "command-r7b"
|
||||
|
||||
#
|
||||
# Custom prompt for the query engine
|
||||
#
|
||||
PROMPT = PromptTemplate(
|
||||
"""You are an expert research assistant. You are given top-ranked writing \
|
||||
excerpts (CONTEXT) and a user's QUERY.
|
||||
|
||||
Instructions:
|
||||
- Base your response *only* on the CONTEXT.
|
||||
- The snippets are ordered from most to least relevant—prioritize insights \
|
||||
from earlier (higher-ranked) snippets.
|
||||
- Aim to reference *as many distinct* relevant files as possible (up to 10).
|
||||
- Do not invent or generalize; refer to specific passages or facts only.
|
||||
- If a passage only loosely matches, deprioritize it.
|
||||
|
||||
Format your answer in two parts:
|
||||
|
||||
1. **Summary Theme**
|
||||
Summarize the dominant theme from the relevant context in a few sentences.
|
||||
|
||||
2. **Matching Files**
|
||||
Make a list of 10 matching files. The format for each should be:
|
||||
<filename> - <rationale tied to content. Include date if available.>
|
||||
|
||||
CONTEXT:
|
||||
{context_str}
|
||||
|
||||
QUERY:
|
||||
{query_str}
|
||||
|
||||
Now provide the theme and list of matching files."""
|
||||
)
|
||||
|
||||
#
|
||||
# Main program routine
|
||||
#
|
||||
|
||||
def main():
|
||||
# Use a local model to generate -- in this case using Ollama
|
||||
Settings.llm = Ollama(
|
||||
model=llm,
|
||||
request_timeout=360.0,
|
||||
)
|
||||
|
||||
# Load embedding model (same as used for vector store)
|
||||
Settings.embed_model = embed_model
|
||||
|
||||
# Load persisted vector store + metadata
|
||||
storage_context = StorageContext.from_defaults(persist_dir="./storage")
|
||||
index = load_index_from_storage(storage_context)
|
||||
|
||||
# Build regular query engine with custom prompt
|
||||
query_engine = index.as_query_engine(
|
||||
similarity_top_k=15,
|
||||
text_qa_template=PROMPT,
|
||||
)
|
||||
|
||||
# Query
|
||||
while True:
|
||||
q = input("\nEnter a search topic or question (or 'exit'): ").strip()
|
||||
if q.lower() in ("exit", "quit"):
|
||||
break
|
||||
print()
|
||||
|
||||
# Generate the response by querying the engine
|
||||
start_time = time.time()
|
||||
response = query_engine.query(q)
|
||||
end_time = time.time()
|
||||
|
||||
# Return the query response and source documents
|
||||
print(response.response)
|
||||
|
||||
print("\nSource documents:")
|
||||
for node in response.source_nodes:
|
||||
meta = getattr(node, "metadata", None) or node.node.metadata
|
||||
print(f" {meta.get('file_name')} {getattr(node, 'score', None)}")
|
||||
|
||||
print(f"\nElapsed time: {(end_time-start_time):.1f} seconds")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue