Remove local_files_only=True from query_hybrid.py
The argument forces a strict deprecation path in newer sentence-transformers/transformers that ignores cache_folder. Removing it lets the script find the locally cached embedding model the same way 03-rag's query.py does. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
6a03a0b9d1
commit
a1f9d4d5ed
1 changed files with 3 additions and 2 deletions
|
|
@ -20,7 +20,8 @@
|
|||
# at import time.
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
os.environ["SENTENCE_TRANSFORMERS_HOME"] = "./models"
|
||||
os.environ["SENTENCE_TRANSFORMERS_HOME"] = os.path.abspath("./models")
|
||||
os.environ["HF_HUB_CACHE"] = os.path.abspath("./models")
|
||||
os.environ["HF_HUB_OFFLINE"] = "1"
|
||||
|
||||
from llama_index.core import (
|
||||
|
|
@ -41,7 +42,7 @@ import sys
|
|||
#
|
||||
|
||||
# Embedding model (must match build_store.py)
|
||||
EMBED_MODEL = HuggingFaceEmbedding(cache_folder="./models", model_name="BAAI/bge-large-en-v1.5", local_files_only=True)
|
||||
EMBED_MODEL = HuggingFaceEmbedding(cache_folder="./models", model_name="BAAI/bge-large-en-v1.5")
|
||||
|
||||
# LLM model for generation
|
||||
LLM_MODEL = "command-r7b"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue