当前位置:   article > 正文

LangChain对象关系梳理_this chain expects multiple inputs (set()

this chain expects multiple inputs (set()

langchain的主要概念和关系梳理。

  • 图中三角箭头:继承关系
  • 图中菱形箭头:组合引用关系

LLM

相关对象关系图如下:

BaseLanguageModel

  1. class BaseLanguageModel(BaseModel, ABC):
  2. @abstractmethod
  3. def generate_prompt(
  4. self, prompts: List[PromptValue], stop: Optional[List[str]] = None
  5. ) -> LLMResult:
  6. """Take in a list of prompt values and return an LLMResult."""
  7. def get_num_tokens(self, text: str) -> int:
  8. """Get the number of tokens present in the text."""
  9. def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
  10. """Get the number of tokens in the message."""

复制

BaseLLM

  1. class BaseLLM(BaseLanguageModel, BaseModel, ABC):
  2. """LLM wrapper should take in a prompt and return a string."""
  3. cache: Optional[bool] = None
  4. verbose: bool = Field(default_factory=_get_verbosity)
  5. """Whether to print out response text."""
  6. callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager)
  7. def generate(
  8. self, prompts: List[str], stop: Optional[List[str]] = None
  9. ) -> LLMResult:
  10. """Run the LLM on the given prompt and input."""
  11. def dict(self, **kwargs: Any) -> Dict:
  12. """Return a dictionary of the LLM."""
  13. starter_dict = dict(self._identifying_params)
  14. starter_dict["_type"] = self._llm_type
  15. return starter_dict
  16. def save(self, file_path: Union[Path, str]) -> None:
  17. """Save the LLM."""

复制

BaseOpenAI

默认的模型是text-davinci-003

  1. class BaseOpenAI(BaseLLM, BaseModel):
  2. """Wrapper around OpenAI large language models."""
  3. client: Any #: :meta private:
  4. model_name: str = "text-davinci-003"
  5. """Model name to use."""
  6. temperature: float = 0.7
  7. """What sampling temperature to use."""
  8. max_tokens: int = 256
  9. """The maximum number of tokens to generate in the completion.
  10. -1 returns as many tokens as possible given the prompt and
  11. the models maximal context size."""
  12. top_p: float = 1
  13. """Total probability mass of tokens to consider at each step."""
  14. frequency_penalty: float = 0
  15. """Penalizes repeated tokens according to frequency."""
  16. presence_penalty: float = 0
  17. """Penalizes repeated tokens."""
  18. n: int = 1
  19. """How many completions to generate for each prompt."""
  20. best_of: int = 1
  21. """Generates best_of completions server-side and returns the "best"."""
  22. model_kwargs: Dict[str, Any] = Field(default_factory=dict)
  23. """Holds any model parameters valid for `create` call not explicitly specified."""
  24. openai_api_key: Optional[str] = None
  25. batch_size: int = 20
  26. """Batch size to use when passing multiple documents to generate."""
  27. request_timeout: Optional[Union[float, Tuple[float, float]]] = None
  28. """Timeout for requests to OpenAI completion API. Default is 600 seconds."""
  29. logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
  30. """Adjust the probability of specific tokens being generated."""
  31. max_retries: int = 6
  32. """Maximum number of retries to make when generating."""
  33. streaming: bool = False
  34. """Whether to stream the results or not."""
  35. def validate_environment(cls, values: Dict) -> Dict:
  36. """Validate that api key and python package exists in environment."""
  37. def get_num_tokens(self, text: str) -> int:
  38. """Calculate num tokens with tiktoken package."""
  39. def modelname_to_contextsize(self, modelname: str) -> int:
  40. """Calculate the maximum number of tokens possible to generate for a model.
  41. text-davinci-003: 4,097 tokens
  42. text-curie-001: 2,048 tokens
  43. text-babbage-001: 2,048 tokens
  44. text-ada-001: 2,048 tokens
  45. code-davinci-002: 8,000 tokens
  46. code-cushman-001: 2,048 tokens
  47. Args:
  48. modelname: The modelname we want to know the context size for.
  49. Returns:
  50. The maximum context size """
  51. def max_tokens_for_prompt(self, prompt: str) -> int:
  52. """Calculate the maximum number of tokens possible to generate for a prompt.

复制

OpenAI

  1. class OpenAI(BaseOpenAI):
  2. """Generic OpenAI class that uses model name."""
  3. @property
  4. def _invocation_params(self) -> Dict[str, Any]:
  5. return {**{"model": self.model_name}, **super()._invocation_params}

复制

BaseChatModel

  1. class BaseChatModel(BaseLanguageModel, BaseModel, ABC):
  2. verbose: bool = Field(default_factory=_get_verbosity)
  3. """Whether to print out response text."""
  4. callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager)

复制

ChatOpenAI

默认的模型是gpt-3.5-turbo,经过对话调优的模型。

  1. class ChatOpenAI(BaseChatModel, BaseModel):
  2. """Wrapper around OpenAI Chat large language models.

复制

Chain

相关对象关系图如下:

Chain

BaseModel是python中用于数据接口定义检查与设置管理的库(pydantic库)。pedantic在运行时强制执行类型提示,并在数据无效时提供友好的错误。参考:https://www.cnblogs.com/dyl0/articles/16896330.html

  1. class Chain(BaseModel, ABC):
  2. """Base interface that all chains should implement."""
  3. memory: Optional[BaseMemory] = None
  4. callback_manager: BaseCallbackManager = Field(
  5. default_factory=get_callback_manager, exclude=True
  6. )
  7. verbose: bool = Field(
  8. default_factory=_get_verbosity
  9. ) # Whether to print the response text
  10. @property
  11. @abstractmethod
  12. def input_keys(self) -> List[str]:
  13. """Input keys this chain expects."""
  14. @property
  15. @abstractmethod
  16. def output_keys(self) -> List[str]:
  17. """Output keys this chain expects."""
  18. def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]:
  19. """Call the chain on all inputs in the list."""
  20. def run(self, *args: str, **kwargs: str) -> str:
  21. """Run the chain as text in, text out or multiple variables, text out."""
  22. def dict(self, **kwargs: Any) -> Dict:
  23. """Return dictionary representation of chain."""
  24. def save(self, file_path: Union[Path, str]) -> None:
  25. """Save the chain."""

复制

LLMChain

  1. class LLMChain(Chain, BaseModel):
  2. """Chain to run queries against LLMs."""
  3. prompt: BasePromptTemplate
  4. """Prompt object to use."""
  5. llm: BaseLanguageModel
  6. output_key: str = "text" #: :meta private:
  7. def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult:
  8. """Generate LLM result from inputs."""
  9. def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]:
  10. """Utilize the LLM generate method for speed gains."""
  11. def predict(self, **kwargs: Any) -> str:
  12. """Format prompt with kwargs and pass to LLM.
  13. def from_string(cls, llm: BaseLanguageModel, template: str) -> Chain:
  14. """Create LLMChain from LLM and template."""

复制

ConversationChain

继承自LLMChain

  1. class ConversationChain(LLMChain, BaseModel):
  2. """Chain to have a conversation and load context from memory.
  3. Example:
  4. .. code-block:: python
  5. from langchain import ConversationChain, OpenAI
  6. conversation = ConversationChain(llm=OpenAI())
  7. """
  8. memory: BaseMemory = Field(default_factory=ConversationBufferMemory)
  9. """Default memory store."""
  10. prompt: BasePromptTemplate = PROMPT
  11. """Default conversation prompt to use."""
  12. input_key: str = "input" #: :meta private:
  13. output_key: str = "response" #: :meta private:

复制

Agent

相关对象关系图如下:

Agent

  1. class Agent(BaseModel):
  2. """Class responsible for calling the language model and deciding the action.
  3. This is driven by an LLMChain. The prompt in the LLMChain MUST include
  4. a variable called "agent_scratchpad" where the agent can put its
  5. intermediary work.
  6. """
  7. llm_chain: LLMChain
  8. allowed_tools: Optional[List[str]] = None
  9. return_values: List[str] = ["output"]
  10. @property
  11. def _stop(self) -> List[str]:
  12. def _construct_scratchpad(
  13. self, intermediate_steps: List[Tuple[AgentAction, str]]
  14. ) -> Union[str, List[BaseMessage]]:
  15. """Construct the scratchpad that lets the agent continue its thought process."""
  16. def _get_next_action(self, full_inputs: Dict[str, str]) -> AgentAction:
  17. def plan(
  18. self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
  19. ) -> Union[AgentAction, AgentFinish]:
  20. """Given input, decided what to do."""
  21. @property
  22. def finish_tool_name(self) -> str:
  23. """Name of the tool to use to finish the chain."""
  24. return "Final Answer"
  25. @root_validator()
  26. def validate_prompt(cls, values: Dict) -> Dict:
  27. """Validate that prompt matches format."""
  28. @classmethod
  29. def from_llm_and_tools(
  30. cls,
  31. llm: BaseLLM,
  32. tools: Sequence[BaseTool],
  33. callback_manager: Optional[BaseCallbackManager] = None,
  34. **kwargs: Any,
  35. ) -> Agent:
  36. """Construct an agent from an LLM and tools."""
  37. def return_stopped_response(
  38. self,
  39. early_stopping_method: str,
  40. intermediate_steps: List[Tuple[AgentAction, str]],
  41. **kwargs: Any,
  42. ) -> AgentFinish:
  43. """Return response when agent has been stopped due to max iterations."""
  44. def save(self, file_path: Union[Path, str]) -> None:
  45. """Save the agent."""

复制

ZeroShotAgent

  1. class ZeroShotAgent(Agent):
  2. """Agent for the MRKL chain."""
  3. @property
  4. def _agent_type(self) -> str:
  5. """Return Identifier of agent type."""
  6. return "zero-shot-react-description"
  7. @property
  8. def observation_prefix(self) -> str:
  9. """Prefix to append the observation with."""
  10. return "Observation: "
  11. @property
  12. def llm_prefix(self) -> str:
  13. """Prefix to append the llm call with."""
  14. return "Thought:"
  15. @classmethod
  16. def create_prompt(
  17. cls,
  18. tools: Sequence[BaseTool],
  19. prefix: str = PREFIX,
  20. suffix: str = SUFFIX,
  21. format_instructions: str = FORMAT_INSTRUCTIONS,
  22. input_variables: Optional[List[str]] = None,
  23. ) -> PromptTemplate:
  24. """Create prompt in the style of the zero shot agent.
  25. @classmethod
  26. def from_llm_and_tools(
  27. cls,
  28. llm: BaseLLM,
  29. tools: Sequence[BaseTool],
  30. callback_manager: Optional[BaseCallbackManager] = None,
  31. prefix: str = PREFIX,
  32. suffix: str = SUFFIX,
  33. format_instructions: str = FORMAT_INSTRUCTIONS,
  34. input_variables: Optional[List[str]] = None,
  35. **kwargs: Any,
  36. ) -> Agent:
  37. """Construct an agent from an LLM and tools."""

复制

AgentExecutor

继承自Chain,拥有run()方法,也有Agent对象作为其属性。

  1. class AgentExecutor(Chain, BaseModel):
  2. """Consists of an agent using tools."""
  3. agent: Agent
  4. tools: Sequence[BaseTool]
  5. return_intermediate_steps: bool = False
  6. max_iterations: Optional[int] = 15
  7. early_stopping_method: str = "force"
  8. """
  9. def from_agent_and_tools(
  10. cls,
  11. agent: Agent,
  12. tools: Sequence[BaseTool],
  13. callback_manager: Optional[BaseCallbackManager] = None,
  14. **kwargs: Any,
  15. ) -> AgentExecutor:
  16. """Create from agent and tools."""
  17. def save_agent(self, file_path: Union[Path, str]) -> None:
  18. """Save the underlying agent."""
  19. def input_keys(self) -> List[str]:
  20. """Return the input keys.
  21. def output_keys(self) -> List[str]:
  22. """Return the singular output key.

复制

Initialize_agent

  1. def initialize_agent(
  2. tools: Sequence[BaseTool],
  3. llm: BaseLLM,
  4. agent: Optional[str] = None,
  5. callback_manager: Optional[BaseCallbackManager] = None,
  6. agent_path: Optional[str] = None,
  7. agent_kwargs: Optional[dict] = None,
  8. **kwargs: Any,
  9. ) -> AgentExecutor:
  10. """Load an agent executor given tools and LLM.
  11. Args:
  12. tools: List of tools this agent has access to.
  13. llm: Language model to use as the agent.
  14. agent: A string that specified the agent type to use. Valid options are:
  15. `zero-shot-react-description`
  16. `react-docstore`
  17. `self-ask-with-search`
  18. `conversational-react-description`
  19. `chat-zero-shot-react-description`,
  20. `chat-conversational-react-description`,
  21. If None and agent_path is also None, will default to
  22. `zero-shot-react-description`.
  23. callback_manager: CallbackManager to use. Global callback manager is used if
  24. not provided. Defaults to None.
  25. agent_path: Path to serialized agent to use.
  26. agent_kwargs: Additional key word arguments to pass to the underlying agent
  27. **kwargs: Additional key word arguments passed to the agent executor
  28. Returns:
  29. An agent executor
  30. """

复制

Memory

相关对象关系图如下:

BaseMemory

  1. class BaseMemory(BaseModel, ABC):
  2. """Base interface for memory in chains."""
  3. class Config:
  4. """Configuration for this pydantic object."""
  5. extra = Extra.forbid
  6. arbitrary_types_allowed = True
  7. @property
  8. @abstractmethod
  9. def memory_variables(self) -> List[str]:
  10. """Input keys this memory class will load dynamically."""
  11. @abstractmethod
  12. def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
  13. """Return key-value pairs given the text input to the chain.
  14. If None, return all memories
  15. """
  16. @abstractmethod
  17. def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
  18. """Save the context of this model run to memory."""
  19. @abstractmethod
  20. def clear(self) -> None:
  21. """Clear memory contents."""

复制

BaseChatMemory

  1. class BaseChatMemory(BaseMemory, ABC):
  2. chat_memory: ChatMessageHistory = Field(default_factory=ChatMessageHistory)
  3. output_key: Optional[str] = None
  4. input_key: Optional[str] = None
  5. return_messages: bool = False
  6. def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
  7. """Save context from this conversation to buffer."""
  8. def clear(self) -> None:
  9. """Clear memory contents."""

复制

ChatMessageHistory

  1. class ChatMessageHistory(BaseModel):
  2. messages: List[BaseMessage] = Field(default_factory=list)
  3. def add_user_message(self, message: str) -> None:
  4. self.messages.append(HumanMessage(content=message))
  5. def add_ai_message(self, message: str) -> None:
  6. self.messages.append(AIMessage(content=message))
  7. def clear(self) -> None:
  8. self.messages = []

复制

ConversationBufferMemory

buffer()方法返回的是BaseChatMemoryChatMessageHistorymesages,类似的Memory还有ConversationBufferWindowMemoryConversationSummaryBufferMemory`等。

  1. class ConversationBufferMemory(BaseChatMemory, BaseModel):
  2. """Buffer for storing conversation memory."""
  3. human_prefix: str = "Human"
  4. ai_prefix: str = "AI"
  5. memory_key: str = "history" #: :meta private:
  6. @property
  7. def buffer(self) -> Any:
  8. """String buffer of memory."""
  9. @property
  10. def memory_variables(self) -> List[str]:
  11. """Will always return list of memory variables.
  12. :meta private:
  13. """
  14. def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
  15. """Return history buffer."""

复制

ConversationBufferWindowMemory

  1. class ConversationBufferWindowMemory(BaseChatMemory, BaseModel):
  2. """Buffer for storing conversation memory."""
  3. human_prefix: str = "Human"
  4. ai_prefix: str = "AI"
  5. memory_key: str = "history" #: :meta private:
  6. k: int = 5
  7. @property
  8. def buffer(self) -> List[BaseMessage]:
  9. """String buffer of memory."""
  10. @property
  11. def memory_variables(self) -> List[str]:
  12. """Will always return list of memory variables."""
  13. def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
  14. """Return history buffer."""

复制

ConversationSummaryBufferMemory

  1. class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin, BaseModel):
  2. """Buffer with summarizer for storing conversation memory."""
  3. max_token_limit: int = 2000
  4. moving_summary_buffer: str = ""
  5. memory_key: str = "history"
  6. @property
  7. def buffer(self) -> List[BaseMessage]:
  8. return self.chat_memory.messages
  9. @property
  10. def memory_variables(self) -> List[str]:
  11. """Will always return list of memory variables.
  12. :meta private:
  13. """
  14. return [self.memory_key]
  15. def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
  16. """Return history buffer."""

复制

ConversationSummaryMemory

  1. class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin, BaseModel):
  2. """Conversation summarizer to memory."""
  3. buffer: str = ""
  4. memory_key: str = "history" #: :meta private:
  5. @property
  6. def memory_variables(self) -> List[str]:
  7. """Will always return list of memory variables.
  8. def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
  9. """Return history buffer."""

复制

Template

相关对象关系图如下:

BasePromptTemplate

  1. class BasePromptTemplate(BaseModel, ABC):
  2. """Base class for all prompt templates, returning a prompt."""
  3. input_variables: List[str]
  4. """A list of the names of the variables the prompt template expects."""
  5. output_parser: Optional[BaseOutputParser] = None
  6. """How to parse the output of calling an LLM on this formatted prompt."""
  7. partial_variables: Mapping[str, Union[str, Callable[[], str]]] = Field(
  8. default_factory=dict
  9. )
  10. @abstractmethod
  11. def format_prompt(self, **kwargs: Any) -> PromptValue:
  12. """Create Chat Messages."""
  13. @abstractmethod
  14. def format(self, **kwargs: Any) -> str:
  15. """Format the prompt with the inputs. """
  16. def dict(self, **kwargs: Any) -> Dict:
  17. """Return dictionary representation of prompt."""
  18. def save(self, file_path: Union[Path, str]) -> None:
  19. """Save the prompt. """

复制

StringPromptTemplate

  1. class StringPromptTemplate(BasePromptTemplate, ABC):
  2. """String prompt should expose the format method, returning a prompt."""
  3. def format_prompt(self, **kwargs: Any) -> PromptValue:
  4. """Create Chat Messages."""
  5. return StringPromptValue(text=self.format(**kwargs))

复制

PromptTemplate

  1. class PromptTemplate(StringPromptTemplate, BaseModel):
  2. """Schema to represent a prompt for an LLM."""
  3. input_variables: List[str]
  4. """A list of the names of the variables the prompt template expects."""
  5. template: str
  6. """The prompt template."""
  7. template_format: str = "f-string"
  8. """The format of the prompt template. Options are: 'f-string', 'jinja2'."""
  9. validate_template: bool = True
  10. """Whether or not to try validating the template."""
  11. def format(self, **kwargs: Any) -> str:
  12. """Format the prompt with the inputs."""
  13. def from_file(
  14. cls, template_file: Union[str, Path], input_variables: List[str]
  15. ) -> PromptTemplate:
  16. """Load a prompt from a file."""
  17. def from_template(cls, template: str) -> PromptTemplate:
  18. """Load a prompt template from a template."""

复制

ChatPromptTemplate

  1. class ChatPromptTemplate(BasePromptTemplate, ABC):
  2. input_variables: List[str]
  3. messages: List[Union[BaseMessagePromptTemplate, BaseMessage]]
  4. def from_role_strings(
  5. cls, string_messages: List[Tuple[str, str]]
  6. ) -> ChatPromptTemplate:
  7. def from_strings(
  8. cls, string_messages: List[Tuple[Type[BaseMessagePromptTemplate], str]]
  9. ) -> ChatPromptTemplate:
  10. def from_messages(
  11. cls, messages: Sequence[Union[BaseMessagePromptTemplate, BaseMessage]]
  12. ) -> ChatPromptTemplate:
  13. def format(self, **kwargs: Any) -> str:
  14. return self.format_prompt(**kwargs).to_string()
  15. def format_prompt(self, **kwargs: Any) -> PromptValue:

复制

BaseMessagePromptTemplate

  1. class BaseMessagePromptTemplate(BaseModel, ABC):
  2. @abstractmethod
  3. def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
  4. """To messages."""
  5. @property
  6. @abstractmethod
  7. def input_variables(self) -> List[str]:
  8. """Input variables for this prompt template."""

复制

BaseStringMessagePromptTemplate

含有StringPromptTemplate字段。

  1. class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
  2. prompt: StringPromptTemplate
  3. additional_kwargs: dict = Field(default_factory=dict)
  4. @classmethod
  5. def from_template(cls, template: str, **kwargs: Any) -> BaseMessagePromptTemplate:
  6. prompt = PromptTemplate.from_template(template)
  7. return cls(prompt=prompt, **kwargs)
  8. @abstractmethod
  9. def format(self, **kwargs: Any) -> BaseMessage:
  10. """To a BaseMessage."""
  11. def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
  12. return [self.format(**kwargs)]
  13. @property
  14. def input_variables(self) -> List[str]:
  15. return self.prompt.input_variables

复制

ChatMessagePromptTemplate

  1. class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate):
  2. role: str
  3. def format(self, **kwargs: Any) -> BaseMessage:
  4. text = self.prompt.format(**kwargs)
  5. return ChatMessage(
  6. content=text, role=self.role, additional_kwargs=self.additional_kwargs
  7. )

复制

HumanMessagePromptTemplate

  1. class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate):
  2. def format(self, **kwargs: Any) -> BaseMessage:
  3. text = self.prompt.format(**kwargs)
  4. return HumanMessage(content=text, additional_kwargs=self.additional_kwargs)

复制

AIMessagePromptTemplate

  1. class AIMessagePromptTemplate(BaseStringMessagePromptTemplate):
  2. def format(self, **kwargs: Any) -> BaseMessage:
  3. text = self.prompt.format(**kwargs)
  4. return AIMessage(content=text, additional_kwargs=self.additional_kwargs)

复制

SystemMessagePromptTemplate

  1. class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate):
  2. def format(self, **kwargs: Any) -> BaseMessage:
  3. text = self.prompt.format(**kwargs)
  4. return SystemMessage(content=text, additional_kwargs=self.additional_kwargs)

复制

Message

相关对象关系图如下:

BaseMessage

  1. class BaseMessage(BaseModel):
  2. """Message object."""
  3. content: str
  4. additional_kwargs: dict = Field(default_factory=dict)
  5. def format_chatml(self) -> str:
  6. raise NotImplementedError()
  7. @property
  8. @abstractmethod
  9. def type(self) -> str:
  10. """Type of the message, used for serialization."""

复制

HumanMessage

  1. class HumanMessage(BaseMessage):
  2. """Type of message that is spoken by the human."""
  3. def format_chatml(self) -> str:
  4. return f"<|im_start|>user\n{self.content}\n<|im_end|>"
  5. @property
  6. def type(self) -> str:
  7. """Type of the message, used for serialization."""
  8. return "human"

复制

AIMessage

  1. class AIMessage(BaseMessage):
  2. """Type of message that is spoken by the AI."""
  3. def format_chatml(self) -> str:
  4. return f"<|im_start|>assistant\n{self.content}\n<|im_end|>"
  5. @property
  6. def type(self) -> str:
  7. """Type of the message, used for serialization."""
  8. return "ai"

复制

SystemMessage

  1. class SystemMessage(BaseMessage):
  2. """Type of message that is a system message."""
  3. def format_chatml(self) -> str:
  4. return f"<|im_start|>system\n{self.content}\n<|im_end|>"
  5. @property
  6. def type(self) -> str:
  7. """Type of the message, used for serialization."""
  8. return "system"

复制

ChatMessage

  1. class ChatMessage(BaseMessage):
  2. """Type of message with arbitrary speaker."""
  3. role: str
  4. def format_chatml(self) -> str:
  5. return f"<|im_start|>{self.role}\n{self.content}\n<|im_end|>"
  6. @property
  7. def type(self) -> str:
  8. """Type of the message, used for serialization."""
  9. return "chat"

复制

Tool

Tool

  1. class Tool(BaseTool):
  2. """Tool that takes in function or coroutine directly."""
  3. description: str = ""
  4. func: Callable[[str], str]
  5. coroutine: Optional[Callable[[str], Awaitable[str]]] = None
  6. def _run(self, tool_input: str) -> str:
  7. """Use the tool."""
  8. return self.func(tool_input)
  9. async def _arun(self, tool_input: str) -> str:
  10. """Use the tool asynchronously."""
  11. if self.coroutine:
  12. return await self.coroutine(tool_input)
  13. raise NotImplementedError("Tool does not support async")
  14. # TODO: this is for backwards compatibility, remove in future
  15. def __init__(
  16. self, name: str, func: Callable[[str], str], description: str, **kwargs: Any
  17. ) -> None:
  18. """Initialize tool."""
  19. super(Tool, self).__init__(
  20. name=name, func=func, description=description, **kwargs
  21. )

复制

Load_tools

  1. def load_tools(
  2. tool_names: List[str],
  3. llm: Optional[BaseLLM] = None,
  4. callback_manager: Optional[BaseCallbackManager] = None,
  5. **kwargs: Any,
  6. ) -> List[BaseTool]:
  7. """Load tools based on their name.

复制

SQLDatabase

支持连接sqlite、MySQLPostgreSQL等常见数据库。比如:db = SQLDatabase.from_uri("sqlite://./notebooks/Chinook.db")

  1. class SQLDatabase:
  2. """SQLAlchemy wrapper around a database."""
  3. def __init__(
  4. self,
  5. engine: Engine,
  6. schema: Optional[str] = None,
  7. metadata: Optional[MetaData] = None,
  8. ignore_tables: Optional[List[str]] = None,
  9. include_tables: Optional[List[str]] = None,
  10. sample_rows_in_table_info: int = 3,
  11. custom_table_info: Optional[dict] = None,
  12. ):
  13. """Create engine from database URI."""
  14. def from_uri(cls, database_uri: str, **kwargs: Any) -> SQLDatabase:
  15. """Construct a SQLAlchemy engine from URI."""
  16. return cls(create_engine(database_uri), **kwargs)
本文内容由网友自发贡献,转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号