赞
踩
服务端代码
import uvicorn from fastapi import FastAPI from pydantic import BaseModel from transformers import AutoTokenizer, LlamaForCausalLM import torch app = FastAPI() class Query(BaseModel): text: str device = torch.device("cuda:0") model_path = 'llama-2-7b-chat-hf' model = LlamaForCausalLM.from_pretrained(model_path, device_map="auto") tokenizer = AutoTokenizer.from_pretrained(model_path) @app.post("/chat/") async def generate_response(query: Query): inputs = f"[INST] {query.text.strip()} [/INST]" input_ids = tokenizer(inputs, return_tensors="pt").input_ids.to(device) generate_ids = model.generate( input_ids, max_new_tokens=500, do_sample=True, top_p=0.85, temperature=1.0, repetition_penalty=1., eos_token_id=2, bos_token_id=1, pad_token_id=0) output = tokenizer.batch_decode(generate_ids)[0] return {"result": output} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=6006)
客户端代码
import requests
url = "https://xxxxxxxxxxxx/chat/"
# 使用新的输入格式,包裹用户输入
query = {"text": "[INST] introduce china[/INST]"} # 修改为使用[INST]标签
response = requests.post(url, json=query)
if response.status_code == 200:
result = response.json()
print("chat:", result["result"])
else:
print("Error:", response.status_code, response.text)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。