diff --git a/04-semantic-search/query_hybrid.py b/04-semantic-search/query_hybrid.py index e32d942..d528659 100644 --- a/04-semantic-search/query_hybrid.py +++ b/04-semantic-search/query_hybrid.py @@ -20,7 +20,8 @@ # at import time. import os os.environ["TOKENIZERS_PARALLELISM"] = "false" -os.environ["SENTENCE_TRANSFORMERS_HOME"] = "./models" +os.environ["SENTENCE_TRANSFORMERS_HOME"] = os.path.abspath("./models") +os.environ["HF_HUB_CACHE"] = os.path.abspath("./models") os.environ["HF_HUB_OFFLINE"] = "1" from llama_index.core import ( @@ -41,7 +42,7 @@ import sys # # Embedding model (must match build_store.py) -EMBED_MODEL = HuggingFaceEmbedding(cache_folder="./models", model_name="BAAI/bge-large-en-v1.5", local_files_only=True) +EMBED_MODEL = HuggingFaceEmbedding(cache_folder="./models", model_name="BAAI/bge-large-en-v1.5") # LLM model for generation LLM_MODEL = "command-r7b"