赞
踩
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings from llama_index.core.embeddings import resolve_embed_model from llama_index.llms.ollama import Ollama import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) documents = SimpleDirectoryReader("data").load_data() # bge embedding model Settings.embed_model = resolve_embed_model("local:/Users/leicq/ai_pychat/BAAI_bge-base-zh-v1.5") # ollama Settings.llm = Ollama(model="llama2-chinese", request_timeout=30.0) import os.path from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage, ) # check if storage already exists PERSIST_DIR = "./storage" if not os.path.exists(PERSIST_DIR): # load the documents and create the index documents = SimpleDirectoryReader("data").load_data() index = VectorStoreIndex.from_documents(documents) # store it for later index.storage_context.persist(persist_dir=PERSIST_DIR) else: # load the existing index storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR) index = load_index_from_storage(storage_context) # Either way we can now query the index query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") print(response)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。