赞
踩
参考:
https://python.langchain.com/docs/integrations/vectorstores/chroma
import ChatGLM from langchain_community.document_loaders import PyPDFLoader import ChatGLM from langchain_community.vectorstores import Chroma from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_community.embeddings import JinaEmbeddings from langchain_core.runnables import RunnableParallel, RunnablePassthrough llm = ChatGLM.ChatGLM_LLM() # loader = PyPDFLoader("唐诗三百首.pdf") loader = PyPDFLoader("西游记.pdf") documents = loader.load_and_split() embeddings = JinaEmbeddings( jina_api_key="jina_c5d02a61c97d4d79b88234362726e94aVLMTvF38wvrElYqpGYSxFtC5Ifhj", model_name="jina-embeddings-v2-base-zh" ) # 第一次存入本地 # vectorstore = Chroma.from_documents(documents, embeddings,persist_directory="./chroma_db") # 从本地加载 vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=embeddings) retriever = vectorstore.as_retriever() template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) llm = ChatGLM.ChatGLM_LLM() output_parser = StrOutputParser() setup_and_retrieval = RunnableParallel( {"context": retriever, "question": RunnablePassthrough()} ) chain = setup_and_retrieval | prompt | llm | output_parser print(chain.invoke("第二十二回讲了什么"))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。