赞
踩
from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_community.vectorstores import FAISS from langchain_community.llms import Tongyi from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_community.embeddings import HuggingFaceEmbeddings import os import time os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b" llm = Tongyi() from langchain_community.document_loaders import UnstructuredURLLoader embeddings = HuggingFaceEmbeddings() # 记录开始时间 start_time = time.time() text = "This is a test document." query_result = embeddings.embed_query(text) end_time = time.time() # 计算并打印函数执行时间 execution_time = end_time - start_time print(f"函数执行时间: {execution_time} 秒") print(query_result[:3]) urls = [ "https://en.wikipedia.org/wiki/Android_(operating_system)" ] loader = UnstructuredURLLoader(urls=urls) documents = loader.load_and_split() # print(documents) # # 第一次存入本地 # vectorstore = FAISS.from_documents(documents, embeddings) # vectorstore.save_local("faiss_index2") # 记录开始时间 start_time = time.time() # # 从本地加载 vectorstore = FAISS.load_local("faiss_index2", embeddings) retriever = vectorstore.as_retriever() template = """Answer the question based on the context below. If the question cannot be answered using the information provided answer with "I don't know" Context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) output_parser = StrOutputParser() setup_and_retrieval = RunnableParallel( {"context": retriever, "question": RunnablePassthrough()} ) chain = setup_and_retrieval | prompt | llm | output_parser print(chain.invoke("what is android")) # 计算并打印函数执行时间 end_time = time.time() execution_time = end_time - start_time print(f"函数执行时间: {execution_time} 秒")
上面是使用的默认的模型,以下指定使用 all-MiniLM-L6-v2:
from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_community.vectorstores import FAISS from langchain_community.llms import Tongyi from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_community.embeddings import HuggingFaceEmbeddings import os import time os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b" llm = Tongyi() from langchain_community.document_loaders import UnstructuredURLLoader model_name = "all-MiniLM-L6-v2" embeddings = HuggingFaceEmbeddings( model_name=model_name, ) # 记录开始时间 start_time = time.time() text = "This is a test document." query_result = embeddings.embed_query(text) end_time = time.time() # 计算并打印函数执行时间 execution_time = end_time - start_time print(f"函数执行时间: {execution_time} 秒") print(query_result[:3]) urls = [ "https://en.wikipedia.org/wiki/Android_(operating_system)" ] loader = UnstructuredURLLoader(urls=urls) documents = loader.load_and_split() # print(documents) # 记录开始时间 start_time = time.time() # 第一次存入本地 vectorstore = FAISS.from_documents(documents, embeddings) vectorstore.save_local("faiss_index2") # # 从本地加载 # vectorstore = FAISS.load_local("faiss_index2", embeddings) retriever = vectorstore.as_retriever() template = """Answer the question based on the context below. If the question cannot be answered using the information provided answer with "I don't know" Context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) output_parser = StrOutputParser() setup_and_retrieval = RunnableParallel( {"context": retriever, "question": RunnablePassthrough()} ) chain = setup_and_retrieval | prompt | llm | output_parser print(chain.invoke("what is android")) # 计算并打印函数执行时间 end_time = time.time() execution_time = end_time - start_time print(f"函数执行时间: {execution_time} 秒")
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。