赞
踩
加载 arXiv 论文,让模型总结前 2000 字
这里采用的是 stuff 策略,也就是将一大段文本。按字数分割成 N 个文本块,又合并成一个大的文本块。
对超大规模不友好,没有区分文档重要性,适合文档量较少场景
- import os
- from dotenv import load_dotenv
- from langchain_community.llms import Tongyi
- load_dotenv('key.env') # 指定加载 env 文件
- key = os.getenv('DASHSCOPE_API_KEY') # 获得指定环境变量
- DASHSCOPE_API_KEY = os.environ["DASHSCOPE_API_KEY"] # 获得指定环境变量
- model = Tongyi(temperature=1)
-
-
- from langchain_core.prompts import PromptTemplate, format_document
- from langchain_core.output_parsers import StrOutputParser
- from langchain_community.document_loaders import ArxivLoader
- from langchain.text_splitter import RecursiveCharacterTextSplitter
-
- # 加载 arXiv 上的论文《ReAct: Synergizing Reasoning and Acting in Language Models》
- loader = ArxivLoader(
- query="2210.03629",
- load_max_docs=1 # 加载第一个匹配的文档
- )
- docs = loader.load()
- print(docs[0].metadata)
-
- # 把文本分割成 500 字一组的切片
- text_splitter = RecursiveCharacterTextSplitter(
- chunk_size=500,
- chunk_overlap=0 # 要求文本没有重叠
- )
- chunks = text_splitter.split_documents(docs)
-
- # 构建 Stuff 形态(即文本直接拼合)的总结链
- doc_prompt = PromptTemplate.from_template("{page_content}")
- chain = (
- {
- "content": lambda docs: "\n\n".join(
- format_document(doc, doc_prompt) for doc in docs
- )
- }
- | PromptTemplate.from_template("用中文总结以下内容,不需要人物介绍,字数控制在 50 字以内:\n\n{content}")
- | model
- | StrOutputParser()
- )
- # 由于论文很长,我们只选取前 2000 字作为输入并调用总结链
- res = chain.invoke(chunks[:4])
- print(res)
对每个文本块最一个回答,最后汇总到一起
但适合超大成千上万的文档量,因为是并行,map,reduce 需要不同提示词模版,多次调用问答模型,效率低
- import os
- from dotenv import load_dotenv
- from langchain_community.llms import Tongyi
- load_dotenv('key.env') # 指定加载 env 文件
- key = os.getenv('DASHSCOPE_API_KEY') # 获得指定环境变量
- DASHSCOPE_API_KEY = os.environ["DASHSCOPE_API_KEY"] # 获得指定环境变量
- model = Tongyi(temperature=1)
-
-
- from functools import partial
- from langchain_core.prompts import PromptTemplate, format_document
- from langchain_core.output_parsers import StrOutputParser
- from langchain_community.document_loaders import ArxivLoader
- from langchain.text_splitter import RecursiveCharacterTextSplitter
-
- # 加载 arXiv 上的论文《ReAct: Synergizing Reasoning and Acting in Language Models》
- loader = ArxivLoader(query="2210.03629", load_max_docs=1)
- docs = loader.load()
-
- # 把文本分割成 500 字一组的切片
- text_splitter = RecursiveCharacterTextSplitter(
- chunk_size = 500,
- chunk_overlap = 50 # 允许文档重叠字数
- )
- chunks = text_splitter.split_documents(docs)
-
- # 构建工具函数:将 Document 转换成字符串
- document_prompt = PromptTemplate.from_template("{page_content}")
- partial_format_document = partial(format_document, prompt=document_prompt)
-
- # 构建 Map 链:对每个文档都先进行一轮总结
- map_chain = (
- {"context": partial_format_document}
- | PromptTemplate.from_template("Summarize this content:\n\n{context}")
- | model
- | StrOutputParser()
- )
-
- # 构建 Reduce 链:合并之前的所有总结内容
- reduce_chain = (
- {"context": lambda strs: "\n\n".join(strs)}
- | PromptTemplate.from_template("Combine these summaries:\n\n{context}")
- | model
- | StrOutputParser()
- )
-
- # 把两个链合并成 MapReduce 链
- map_reduce = map_chain.map() | reduce_chain
- res = map_reduce.invoke(chunks[:4], config={"max_concurrency": 5})
- print(res)
也是多轮,但每一轮输入都只包含一个文档,以及之前轮次的中间回答
优势:
每次只需要针对一个文档生成回答,避免过长 context;回答是逐步推理和完善的,而不是一次性塞入所有信息;可以自定义每轮的提示词模版,实现更精细的控制
劣势:
文档顺序对结果又很大影响,需要智能排序;计算时间高
- import os
- from dotenv import load_dotenv
- from langchain_community.llms import Tongyi
- load_dotenv('key.env') # 指定加载 env 文件
- key = os.getenv('DASHSCOPE_API_KEY') # 获得指定环境变量
- DASHSCOPE_API_KEY = os.environ["DASHSCOPE_API_KEY"] # 获得指定环境变量
- llm = Tongyi(temperature=1)
-
- from functools import partial
- from operator import itemgetter
- from langchain_core.prompts import PromptTemplate, format_document
- from langchain_core.output_parsers import StrOutputParser
- from langchain_community.document_loaders import ArxivLoader
- from langchain.text_splitter import RecursiveCharacterTextSplitter
-
- # 加载 arXiv 上的论文《ReAct: Synergizing Reasoning and Acting in Language Models》
- loader = ArxivLoader(query="2210.03629", load_max_docs=1)
- docs = loader.load()
-
- # 把文本分割成 500 字一组的切片
- text_splitter = RecursiveCharacterTextSplitter(
- chunk_size=500,
- chunk_overlap=50
- )
- chunks = text_splitter.split_documents(docs)
-
- # 构建工具函数:将 Document 转换成字符串
- document_prompt = PromptTemplate.from_template("{page_content}")
- partial_format_document = partial(format_document, prompt=document_prompt)
-
- # 构建 Context 链:总结第一个文档并作为后续总结的上下文
- first_prompt = PromptTemplate.from_template("Summarize this content:\n\n{context}")
- context_chain = {"context": partial_format_document} | first_prompt | llm | StrOutputParser()
-
- # 构建 Refine 链:基于上下文(上一次的总结)和当前内容进一步总结
- refine_prompt = PromptTemplate.from_template(
- "Here's your first summary: {prev_response}. "
- "Now add to it based on the following context: {context}"
- )
- refine_chain = (
- {
- "prev_response": itemgetter("prev_response"),
- "context": lambda x: partial_format_document(x["doc"]),
- }
- | refine_prompt
- | llm
- | StrOutputParser()
- )
-
-
- # 构建一个负责执行 Refine 循环的函数
- def refine_loop(docs):
- summary = context_chain.invoke(docs[0])
- for i, doc in enumerate(docs[1:]):
- summary = refine_chain.invoke({"prev_response": summary, "doc": doc})
- return summary
-
-
- res = refine_loop(chunks[:4])
- print(res)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。