当前位置:   article > 正文

chatglm4使用

chatglm4

1.Token

glm4开始闭源了,所以就使用api来试试。首先去开发平台登录,这里注册就送10万token,去API keys拿到自己的key。给自己的环境安装一下他们的库pip install zhipuai,

2.使用

from zhipuai import ZhipuAI
client = ZhipuAI(api_key=zhipuai_api_key) #APIKey
response = client.chat.completions.create(
    model="glm-4",  # 填写需要调用的模型名称
    messages=[
        {"role": "assistant", "content": "我是人工智能助手GLM4"},
        {"role": "user", "content": "给我写一首优美的诗歌"},
    ],
)
print(response.choices[0].message.content)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

使用流式输出时,在messages里面添加stream参数,打印时使用循环。

response = client.chat.completions.create(
    model="glm-4",  # 填写需要调用的模型名称
    messages=[
        {"role": "assistant", "content": "我是人工智能助手GLM4"},
        {"role": "user", "content": "给我写一首优美的诗歌"},
    ],
    stream=True,
)
for chunk in response:
    print(chunk.choices[0].delta.content,end="")
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

3.结合Langchain框架

1.LLM

首先封装LLm。

from zhipuai import ZhipuAI
import json,os,yaml
from langchain.llms.base import LLM
from typing import List, Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
def tool_config_from_file(tool_name, directory="Tool/"):
    """search tool yaml and return json format"""
    for filename in os.listdir(directory):
        if filename.endswith('.yaml') and tool_name in filename:
            file_path = os.path.join(directory, filename)
            with open(file_path, encoding='utf-8') as f:
                return yaml.safe_load(f)
    return None
class ChatGLM4(LLM):
    max_token: int = 8192
    do_sample: bool = True
    temperature: float = 0.8
    top_p = 0.8
    tokenizer: object = None
    model: object = None
    history: List = []
    tool_names: List = []
    has_search: bool = False
    client:object =None
    def __init__(self):
        super().__init__()
        self.client = ZhipuAI(api_key=zhipuai_api_key) 

    @property
    def _llm_type(self) -> str:
        return "ChatGLM4"
    
    def stream(self,prompt:str,history=[]):
        if history is None:
            history=[]
            
        history.append({"role": "user", "content": prompt})
        response = self.client.chat.completions.create(
            model="glm-4",  # 填写需要调用的模型名称
            messages=history,
            stream=True,
        )
        for chunk in response:
            yield chunk.choices[0].delta.content
        
    def _tool_history(self, prompt: str):
        ans = []
        tool_prompts = prompt.split(
            "You have access to the following tools:\n\n")[1].split("\n\nUse a json blob")[0].split("\n")

        tool_names = [tool.split(":")[0] for tool in tool_prompts]
        self.tool_names = tool_names
        tools_json = []
        for i, tool in enumerate(tool_names):
            tool_config = tool_config_from_file(tool)
            if tool_config:
                tools_json.append(tool_config)
            else:
                ValueError(
                    f"Tool {tool} config not found! It's description is {tool_prompts[i]}"
                )

        ans.append({
            "role": "system",
            "content": "Answer the following questions as best as you can. You have access to the following tools:",
            "tools": tools_json
        })
        query = f"""{prompt.split("Human: ")[-1].strip()}"""
        return ans, query

    def _extract_observation(self, prompt: str):
        return_json = prompt.split("Observation: ")[-1].split("\nThought:")[0]
        self.history.append({
            "role": "observation",
            "content": return_json
        })
        return

    def _extract_tool(self):
        if len(self.history[-1]["metadata"]) > 0:
            metadata = self.history[-1]["metadata"]
            content = self.history[-1]["content"]
            if "tool_call" in content:
                for tool in self.tool_names:
                    if tool in metadata:
                        input_para = content.split("='")[-1].split("'")[0]
                        action_json = {
                            "action": tool,
                            "action_input": input_para
                        }
                        self.has_search = True
                        return f"""
Action: 
\```
{json.dumps(action_json, ensure_ascii=False)}
```"""
        final_answer_json = {
            "action": "Final Answer",
            "action_input": self.history[-1]["content"]
        }
        self.has_search = False
        return f"""
Action: 
\```
{json.dumps(final_answer_json, ensure_ascii=False)}
```"""

    def _call(self, prompt: str, history: List = [], stop: Optional[List[str]] = ["<|user|>"]):
        if history is None:
            history=[]
            
        history.append({"role": "user", "content": prompt})
        response = self.client.chat.completions.create(
            model="glm-4",  # 填写需要调用的模型名称
            messages=history,
        )
        
        
        result = response.choices[0].message.content
        print(result)
        return result
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122

然后实例化并使用

llm = ChatGLM4()
template = """{question}"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "介绍一下北京?"
llm_chain.invoke(question)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

2.知识库

1.文本知识库

参考LangChain官网Faiss库

from langchain.text_splitter import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
query = "小明现在住在那里呢?"
llm = ChatGLM4()
embedding_path="C:/Users/dz/Desktop/bge-large-zh-v1.5"
embeddings = HuggingFaceEmbeddings(model_name=embedding_path)
loader = TextLoader("C:/Users/dz/Desktop/qu.txt",encoding="UTF-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=10, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
vectorstore = FAISS.from_documents(docs, embeddings)
docs = vectorstore.similarity_search(query)#找出匹配的文字段
retriever=docs[0].page_content
print(retriever)
template = """只根据以下情境回答问题:
{context}
问题: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
output_parser = StrOutputParser()
print(vectorstore.similarity_search_with_score(query))
chain = prompt | llm | output_parser
chain.invoke({"context":retriever,"question":query})
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28

2.文字知识库

vectorstore = FAISS.from_texts(["小明住在洛杉矶", "小黄住在伦敦","小兰住在纽约"],embedding=embeddings)
retriever = vectorstore.as_retriever()
print(retriever)
template = """只根据以下情境回答问题:
{context}
问题: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
print(vectorstore.similarity_search_with_score(query))
chain = setup_and_retrieval | prompt | llm | output_parser
chain.invoke(query)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15

3.自定义tool并使用

1.使用shell运行本地py脚本

这个脚本去年用gpt测试成功,然后chatglm一直没成功,我就改写了langchain里面的agent,可以参考博文,但现在glm4兼容langchain,简直太好用了。我在本地写了一个打卡相机的py脚本:

 #coding=utf-8
import cv2
cap=cv2.VideoCapture(0)
success=cap.isOpened()
while success:
    ret,frames=cap.read()
    frame_rgb=cv2.cvtColor(frames,cv2.COLOR_BGR2RGB)
    cv2.imshow("windom",frame_rgb)
    if cv2.waitKey(10)&0xFF==ord("q"):
        break
cap.release()
cv2.destroyAllWindows()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

然后自定义个调用shell的tool,把这个tool交给agent。

from langchain.tools import BaseTool
from langchain.agents import initialize_agent
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.tools import ShellTool

shell_tool = ShellTool()
class Cam(BaseTool):
    name = "相机"
    description = '使用此工具打开我的相机。'

    def _run(self, expr: str):
        return shell_tool.run({"commands": "conda activate langchain & python C:/Users/dz/Desktop/camera.py"})

    def _arun(self, query: str):
        raise NotImplementedError("Async operation not supported yet")

tools = [Cam()]
agent = initialize_agent(
    agent='chat-conversational-react-description',
    tools=tools,
    llm=llm,
    verbose=True,
    max_iterations=3,
    early_stopping_method='generate',
    memory=ConversationBufferWindowMemory(
        memory_key='chat_history',
        k=5,
        return_messages=True
    )
)
agent(f"打开我的相机?")
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31

测试成功
在这里插入图片描述

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/繁依Fanyi0/article/detail/260365
推荐阅读
  

闽ICP备14008679号