# cache_model.py # # Pre-download the embedding model so build.py doesn't have to fetch it. from llama_index.embeddings.huggingface import HuggingFaceEmbedding embed_model = HuggingFaceEmbedding( cache_folder="./models", model_name="BAAI/bge-large-en-v1.5" ) print("Embedding model cached in ./models")