赞
踩
参考: https://www.pinecone.io/learn/series/langchain/langchain-expression-language/
以下使用了3399.pdf, Rockchip RK3399 TRM Part1
import ChatGLM from langchain.chains import LLMChain from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain.chains import SimpleSequentialChain from langchain_core.runnables import RunnablePassthrough from operator import itemgetter from langchain_community.document_loaders import PyPDFLoader import ChatGLM from langchain.chains import LLMChain from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_community.tools.tavily_search import TavilySearchResults from langchain.chains import LLMMathChain from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_community.document_loaders import TextLoader from langchain.text_splitter import CharacterTextSplitter from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_community.embeddings import JinaEmbeddings # https://jina.ai/embeddings/ # https://python.langchain.com/docs/integrations/text_embedding/jina # demo: https://python.langchain.com/cookbook llm = ChatGLM.ChatGLM_LLM() loader = PyPDFLoader("3399.pdf") documents = loader.load_and_split() embeddings = JinaEmbeddings( jina_api_key="jina_fa2c341a2f634f1381f7cfec767150caSconYmQA2XRAcVKfZ7-Zboaqeydu", model_name="jina-embeddings-v2-base-en" ) vectorstore = Chroma.from_documents(documents, embeddings) retriever = vectorstore.as_retriever() template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) llm = ChatGLM.ChatGLM_LLM() output_parser = StrOutputParser() setup_and_retrieval = RunnableParallel( {"context": retriever, "question": RunnablePassthrough()} ) chain = setup_and_retrieval | prompt | llm | output_parser print(chain.invoke("eFuse Function Description"))
更新之后的版本:
import ChatGLM from langchain_community.document_loaders import PyPDFLoader from langchain_community.vectorstores import Chroma from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_community.embeddings import JinaEmbeddings from langchain_core.runnables import RunnableParallel, RunnablePassthrough llm = ChatGLM.ChatGLM_LLM() loader = PyPDFLoader("西游记.pdf") documents = loader.load_and_split() embeddings = JinaEmbeddings( jina_api_key="jina_c5d02a61c97d4d79b88234362726e94aVLMTvF38wvrElYqpGYSxFtC5Ifhj", model_name="jina-embeddings-v2-base-zh" ) # 第一次存入本地 # vectorstore = Chroma.from_documents(documents, embeddings,persist_directory="./chroma_db") # 从本地加载 vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=embeddings) retriever = vectorstore.as_retriever() template = """Answer the question based only on the following context,if can not ,please just say: I do not know, please think step by step: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) llm = ChatGLM.ChatGLM_LLM() output_parser = StrOutputParser() setup_and_retrieval = RunnableParallel( {"context": retriever, "question": RunnablePassthrough()} ) chain = setup_and_retrieval | prompt | llm | output_parser # print(chain.invoke("介绍下红楼梦")) print(chain.invoke("第二十二回讲了什么"))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。