当前位置:   article > 正文

LLM大语言模型(十五):LangChain的Agent中使用自定义的ChatGLM,且底层调用的是remote的ChatGLM3-6B的HTTP服务_langchain调用本地chatglm

langchain调用本地chatglm

背景

本文搭建了一个完整的LangChain的Agent,调用本地启动的ChatGLM3-6B的HTTP server。

为后续的RAG做好了准备。

增加服务端role:observation

ChatGLM3的官方demo:openai_api_demo目录

api_server.py文件

  1. class ChatMessage(BaseModel):
  2. # role: Literal["user", "assistant", "system", "function"]
  3. role: Literal["user", "assistant", "system", "function","observation"]
  4. content: str = None
  5. name: Optional[str] = None
  6. function_call: Optional[FunctionCallResponse] = None

修改role列表,增加了“observation”。

这是因为LangChain的Agent执行过程,是ReAct模式,在执行完tool调用后,会生成一个observation角色的消息。

在将LangChain的prompt转换为ChatGLM3的prompt时,也保留了observation角色,但是在服务启动时,接口允许的role却没有observation,会导致接口调用失败。

ChatGLM3-6B 本地HTTP服务启动

参考:

LLM大语言模型(一):ChatGLM3-6B本地部署_llm3 部署-CSDN博客

自定义LLM

自定义LLM内部访问的是HTTP server。

将LangChain Agent的prompt转换为ChatGLM3能识别的prompt。

prompt转换参考:LLM大语言模型(十三):ChatGLM3-6B兼容Langchain的Function Call的一步一步的详细转换过程记录_langchain+chatglm3-CSDN博客

  1. import ast
  2. import requests
  3. import json
  4. from typing import Any, List, Optional
  5. from langchain.llms.base import LLM
  6. from langchain_core.callbacks import CallbackManagerForLLMRun
  7. from output_parse import getFirstMsg,parse_tool
  8. class MyChatGLM(LLM):
  9. max_token: int = 8192
  10. # do_sample: bool = False
  11. do_sample: bool = True
  12. temperature: float = 0.8
  13. top_p = 0.8
  14. tokenizer: object = None
  15. model: object = None
  16. history: List = []
  17. has_search: bool = False
  18. model_name: str = "chatglm3-6b"
  19. url: str = "http://localhost:8000/v1/chat/completions"
  20. tools: List = []
  21. # def __init__(self):
  22. # super().__init__()
  23. @property
  24. def _llm_type(self) -> str:
  25. return "MyChatGLM"
  26. def _tool_history(self, prompt: str):
  27. ans = []
  28. tool_prompts = prompt.split(
  29. "You have access to the following tools:\n\n")[1].split("\n\nUse a json blob")[0].split("\n")
  30. tools_json = []
  31. for tool_desc in tool_prompts:
  32. name = tool_desc.split(":")[0]
  33. description = tool_desc.split(", args:")[0].split(":")[1].strip()
  34. parameters_str = tool_desc.split("args:")[1].strip()
  35. parameters_dict = ast.literal_eval(parameters_str)
  36. params_cleaned = {}
  37. for param, details in parameters_dict.items():
  38. params_cleaned[param] = {'description': details['description'], 'type': details['type']}
  39. tools_json.append({
  40. "name": name,
  41. "description": description,
  42. "parameters": params_cleaned
  43. })
  44. ans.append({
  45. "role": "system",
  46. "content": "Answer the following questions as best as you can. You have access to the following tools:",
  47. "tools": tools_json
  48. })
  49. dialog_parts = prompt.split("Human: ")
  50. for part in dialog_parts[1:]:
  51. if "\nAI: " in part:
  52. user_input, ai_response = part.split("\nAI: ")
  53. ai_response = ai_response.split("\n")[0]
  54. else:
  55. user_input = part
  56. ai_response = None
  57. ans.append({"role": "user", "content": user_input.strip()})
  58. if ai_response:
  59. ans.append({"role": "assistant", "content": ai_response.strip()})
  60. query = dialog_parts[-1].split("\n")[0]
  61. return ans, query
  62. def _extract_observation(self, prompt: str):
  63. return_json = prompt.split("Observation: ")[-1].split("\nThought:")[0]
  64. self.history.append({
  65. "role": "observation",
  66. "content": return_json
  67. })
  68. return
  69. def _extract_tool(self):
  70. if len(self.history[-1]["metadata"]) > 0:
  71. metadata = self.history[-1]["metadata"]
  72. content = self.history[-1]["content"]
  73. lines = content.split('\n')
  74. for line in lines:
  75. if 'tool_call(' in line and ')' in line and self.has_search is False:
  76. # 获取括号内的字符串
  77. params_str = line.split('tool_call(')[-1].split(')')[0]
  78. # 解析参数对
  79. params_pairs = [param.split("=") for param in params_str.split(",") if "=" in param]
  80. params = {pair[0].strip(): pair[1].strip().strip("'\"") for pair in params_pairs}
  81. action_json = {
  82. "action": metadata,
  83. "action_input": params
  84. }
  85. self.has_search = True
  86. print("*****Action*****")
  87. print(action_json)
  88. print("*****Answer*****")
  89. return f"""
  90. Action:
  91. ```
  92. {json.dumps(action_json, ensure_ascii=False)}
  93. ```"""
  94. final_answer_json = {
  95. "action": "Final Answer",
  96. "action_input": self.history[-1]["content"]
  97. }
  98. self.has_search = False
  99. return f"""
  100. Action:
  101. ```
  102. {json.dumps(final_answer_json, ensure_ascii=False)}
  103. ```"""
  104. def _call(self, prompt: str, history: List = [], stop: Optional[List[str]] = ["<|user|>"]):
  105. if not self.has_search:
  106. self.history, query = self._tool_history(prompt)
  107. if self.history[0]:
  108. self.tools = self.history[0]["tools"]
  109. else:
  110. self._extract_observation(prompt)
  111. query = ""
  112. print(self.history)
  113. data = {}
  114. data["model"] = self.model_name
  115. data["messages"] = self.history
  116. data["temperature"] = self.temperature
  117. data["max_tokens"] = self.max_token
  118. data["tools"] = self.tools
  119. resp = self.doRequest(data)
  120. msg = {}
  121. respjson = json.loads(resp)
  122. if respjson["choices"]:
  123. if respjson["choices"][0]["finish_reason"] == 'function_call':
  124. msg["metadata"] = respjson["choices"][0]["message"]["function_call"]["name"]
  125. else:
  126. msg["metadata"] = ''
  127. msg["role"] = "assistant"
  128. msg["content"] = respjson["choices"][0]["message"]["content"]
  129. self.history.append(msg)
  130. print(self.history)
  131. response = self._extract_tool()
  132. history.append((prompt, response))
  133. return response
  134. def doRequest(self,payload:dict) -> str:
  135. # 请求头
  136. headers = {"content-type":"application/json"}
  137. # json形式,参数用json
  138. res = requests.post(self.url,json=payload,headers=headers)
  139. return res.text

定义tool

使用LangChain中Tool的方式:继承BaseTool

Tool实现方式对prompt的影响,参考:LLM大语言模型(十四):LangChain中Tool的不同定义方式,对prompt的影响-CSDN博客

  1. class WeatherInput(BaseModel):
  2. location: str = Field(description="the location need to check the weather")
  3. class Weather(BaseTool):
  4. name = "weather"
  5. description = "Use for searching weather at a specific location"
  6. args_schema: Type[BaseModel] = WeatherInput
  7. def __init__(self):
  8. super().__init__()
  9. def _run(self, location: str) -> dict[str, Any]:
  10. weather = {
  11. "temperature": "20度",
  12. "description": "温度适中",
  13. }
  14. return weather

LangChain Agent调用

设置Agent使用了2个tool:Calculator() Weather(),看是否能正确调用。

  1. # Get the prompt to use - you can modify this!
  2. prompt = hub.pull("hwchase17/structured-chat-agent")
  3. prompt.pretty_print()
  4. tools = [Calculator(),Weather()]
  5. # Choose the LLM that will drive the agent
  6. # Only certain models support this
  7. # Choose the LLM to use
  8. llm = MyChatGLM()
  9. # Construct the agent
  10. agent = create_structured_chat_agent(llm, tools, prompt)
  11. # Create an agent executor by passing in the agent and tools
  12. agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)
  13. ans = agent_executor.invoke({"input": "北京天气怎么样?"})
  14. print(ans)

调用结果:

> Entering new AgentExecutor chain...
[{'role': 'system', 'content': 'Answer the following questions as best as you can. You have access to the following tools:', 'tools': [{'name': 'Calculator', 'description': 'Useful for when you need to calculate math problems', 'parameters': {'calculation': {'description': 'calculation to perform', 'type': 'string'}}}, {'name': 'weather', 'description': 'Use for searching weather at a specific location', 'parameters': {'location': {'description': 'the location need to check the weather', 'type': 'string'}}}]}, {'role': 'user', 'content': '北京天气怎么样?\n\n\n (reminder to respond in a JSON blob no matter what)'}]
[{'role': 'system', 'content': 'Answer the following questions as best as you can. You have access to the following tools:', 'tools': [{'name': 'Calculator', 'description': 'Useful for when you need to calculate math problems', 'parameters': {'calculation': {'description': 'calculation to perform', 'type': 'string'}}}, {'name': 'weather', 'description': 'Use for searching weather at a specific location', 'parameters': {'location': {'description': 'the location need to check the weather', 'type': 'string'}}}]}, {'role': 'user', 'content': '北京天气怎么样?\n\n\n (reminder to respond in a JSON blob no matter what)'}, {'metadata': 'weather', 'role': 'assistant', 'content': "weather\n ```python\ntool_call(location='北京')\n```"}]
*****Action*****
{'action': 'weather', 'action_input': {'location': '北京'}}
*****Answer*****

Action:
```
{"action": "weather", "action_input": {"location": "北京"}}
```{'temperature': '20度', 'description': '温度适中'}

[{'role': 'system', 'content': 'Answer the following questions as best as you can. You have access to the following tools:', 'tools': [{'name': 'Calculator', 'description': 'Useful for when you need to calculate math problems', 'parameters': {'calculation': {'description': 'calculation to perform', 'type': 'string'}}}, {'name': 'weather', 'description': 'Use for searching weather at a specific location', 'parameters': {'location': {'description': 'the location need to check the weather', 'type': 'string'}}}]}, {'role': 'user', 'content': '北京天气怎么样?\n\n\n (reminder to respond in a JSON blob no matter what)'}, {'metadata': 'weather', 'role': 'assistant', 'content': "weather\n ```python\ntool_call(location='北京')\n```"}, {'role': 'observation', 'content': "{'temperature': '20度', 'description': '温度适中'}"}]
[{'role': 'system', 'content': 'Answer the following questions as best as you can. You have access to the following tools:', 'tools': [{'name': 'Calculator', 'description': 'Useful for when you need to calculate math problems', 'parameters': {'calculation': {'description': 'calculation to perform', 'type': 'string'}}}, {'name': 'weather', 'description': 'Use for searching weather at a specific location', 'parameters': {'location': {'description': 'the location need to check the weather', 'type': 'string'}}}]}, {'role': 'user', 'content': '北京天气怎么样?\n\n\n (reminder to respond in a JSON blob no matter what)'}, {'metadata': 'weather', 'role': 'assistant', 'content': "weather\n ```python\ntool_call(location='北京')\n```"}, {'role': 'observation', 'content': "{'temperature': '20度', 'description': '温度适中'}"}, {'metadata': '', 'role': 'assistant', 'content': '根据最新的气象数据 
,北京的天气情况如下:温度为20度,天气状况适中。'}]

Action:
```
{"action": "Final Answer", "action_input": "根据最新的气象数据,北京的天气情况如下:温度为20度,天气状况适中。"}
```

> Finished chain.
{'input': '北京天气怎么样?', 'output': '根据最新的气象数据,北京的天气情况如下:温度为20度,天气状况适中。'}

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/天景科技苑/article/detail/896178
推荐阅读
相关标签
  

闽ICP备14008679号