赞
踩
import gradio from transformers import AutoTokenizer, AutoModel from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_community.llms import HuggingFacePipeline import time def greet(name): response = chain.invoke({"user_input": name}) return response model = HuggingFacePipeline.from_model_id( model_id="THUDM/chatglm3-6b", task="text-generation", device=0, model_kwargs={"trust_remote_code":True}, pipeline_kwargs={"max_new_tokens": 5000}, ) prompt = ChatPromptTemplate.from_template("告诉我关于{user_input}的经济发展情况,不多于200个字") output_parser = StrOutputParser() chain = prompt | model | output_parser demo = gradio.Interface(fn=greet, inputs="text", outputs="text") demo.launch()
import gradio from langchain_core.prompts import ChatPromptTemplate from langchain.prompts import PromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_community.llms import HuggingFacePipeline from langchain.prompts import HumanMessagePromptTemplate import time # from ChatGLM_new import zhipu_llm # model = zhipu_llm model = HuggingFacePipeline.from_model_id( model_id="THUDM/chatglm3-6b", task="text-generation", verbose=True, device=0, model_kwargs={"trust_remote_code":True}, pipeline_kwargs={"max_new_tokens": 5000}, ) prompt = ChatPromptTemplate.from_messages([ # ("system", "记住:对所有问题你只回答下面的4个字:我不知道,"), # ("human", "Hello, how are you doing?"), # ("ai", "I'm doing well, thanks!"), ("human", "告诉我关于{user_input}的经济发展情况,不多于200个字"), ]) prompt = ChatPromptTemplate.from_messages([ HumanMessagePromptTemplate.from_template("告诉我关于{user_input}的经济发展情况,不多于200个字"), ]) output_parser = StrOutputParser() chain = prompt | model | output_parser def greet(name): response = chain.invoke({"user_input": name}) return response demo = gradio.Interface(fn=greet, inputs="text", outputs="text") demo.launch()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。