Five modules covering nanoGPT, Ollama, RAG, semantic search, and neural networks. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
12 lines
314 B
Python
12 lines
314 B
Python
# cache_model.py
|
|
#
|
|
# Pre-download the embedding model so build.py doesn't have to fetch it.
|
|
|
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
|
|
embed_model = HuggingFaceEmbedding(
|
|
cache_folder="./models",
|
|
model_name="BAAI/bge-large-en-v1.5"
|
|
)
|
|
|
|
print("Embedding model cached in ./models")
|