赞
踩
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.core import Settings import os os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:4000" os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" documents = SimpleDirectoryReader("./data/paul_graham").load_data() Settings.embed_model = HuggingFaceEmbedding( model_name="/home/leicq/Documents/LLM_models/bge-large-zh-v1.5" ) index = VectorStoreIndex.from_documents( documents, ) print("hello")
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。