当前位置:   article > 正文

LLMChain的使用_llmchain invoke

llmchain invoke
import ChatGLM
from langchain.chains import LLMChain

from langchain_core.prompts import ChatPromptTemplate

from langchain.chains import SimpleSequentialChain

prompt = ChatPromptTemplate.from_template("What is the best name to describe a company that makes {product},please respond in chinese, must contian {a2}")
llm = ChatGLM.ChatGLM_LLM()
chain_one = LLMChain(
    llm=llm,
    prompt=prompt,
    verbose=True
)
print(chain_one.invoke({"product":"shoes","a2":"深圳"}))
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
from langchain.prompts import PromptTemplate
from langchain_core.prompts import ChatPromptTemplate
import gradio as gr
from langchain.chains import LLMChain
from LLMs import myllm
hf = myllm()
template = """{question}"""
prompt = PromptTemplate.from_template(template)
chain = prompt | hf


prompt = ChatPromptTemplate.from_template("What is the best name to describe a company that makes {product},please respond in chinese, must contian {words}")
prompt2 = ChatPromptTemplate.from_template("{input}")

llm = hf
chain_one = LLMChain(
    llm=llm,
    prompt=prompt,
    verbose=False
)

chain_one2 = LLMChain(
    llm=llm,
    prompt=prompt2,
    verbose=False
)

chains =  {"input": chain_one } |  chain_one2

print(chains.invoke({"product":"shoes","words":"深圳"}))


  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32

@chain的使用

from langchain.prompts import PromptTemplate
from langchain_core.prompts import ChatPromptTemplate
import gradio as gr
from langchain.chains import LLMChain
from langchain_core.runnables import chain
from langchain_core.output_parsers import StrOutputParser
from LLMs import myllm
hf = myllm()
prompt = ChatPromptTemplate.from_template("What is the best name to describe a company that makes {product},please respond in chinese, must contian {words}")
prompt2 = ChatPromptTemplate.from_template("{input}")

llm = hf
chain_one = LLMChain(
    llm=llm,
    prompt=prompt,
    verbose=False
)

chain_one2 = LLMChain(
    llm=llm,
    prompt=prompt2,
    verbose=False
)

@chain
def  myadd(name:str):
    print(name)
    # return "myadd"
    return name

# chains = {"input": chain_one } | prompt2|llm | StrOutputParser()

chains2 =  {"input": chain_one } | chain_one2 | myadd
# print(chains.invoke({"product":"shoes","words":"深圳"}))
print(chains2.invoke({"product":"shoes","words":"北京"}))

# print(chains.predict(product="shoes",words="深圳"))

# print(chains.generate(
#     [
#         {"product":"shoes","words":"深圳"},
#        {"product":"shoes","words":"北京"},
#      ]
#     ))

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小蓝xlanll/article/detail/688489
推荐阅读
相关标签
  

闽ICP备14008679号