赞
踩
安装:
ollama run qwen:0.5b
ollama run qwen:1.8b
用vscode而不是cmd调用 D:\Research\flower_exif\gemma_local.py
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms.ollama import Ollama
llm = Ollama(base_url="http://localhost:11434",model="qwen:1.8b",)
def get_completion_ollama(prompt):
return llm.invoke(prompt)
prompt = 'could you describe a storm weather'
res = get_completion_ollama(prompt=prompt)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。