赞
踩
from transformers.pipelines import SUPPORTED_TASKS, get_supported_tasks
print(SUPPORTED_TASKS.items(), get_supported_tasks())
from transformers import pipeline
pipe = pipeline("text-classification")
pipe("very good!")
# [{'label': 'POSITIVE', 'score': 0.9998525381088257}]
from transformers import pipeline
# https://huggingface.co/models
pipe = pipeline("text-classification",
model="uer/roberta-base-finetuned-dianping-chinese")
pipe("我觉得不太行!")
# [{'label': 'negative (stars 1, 2 and 3)', 'score': 0.9735506772994995}]
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
model = AutoModelForSequenceClassification.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
tokenizer = AutoTokenizer.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
pipe("你真是个人才!")
# [{'label': 'positive (stars 4 and 5)', 'score': 0.8717765808105469}]
cpu
上pipe.model.device
# device(type='cpu')
%%time # 魔法命令,统计时间
for i in range(100):
pipe("你真是个人才!")
'''
CPU times: total: 19.4 s
Wall time: 4.94 s
'''
import torch
import time
times = []
for i in range(100):
torch.cuda.synchronize()
start = time.time()
pipe("我觉得不太行!")
torch.cuda.synchronize()
end = time.time()
times.append(end - start)
print(sum(times) / 100)
# 0.05427998542785645 CPU
# 0.012370436191558839 GPU
pipe = pipeline("text-classification", model="uer/roberta-base-finetuned-dianping-chinese", device=0)
pipe.model.device
# device(type='cuda', index=0)
qa_pipeline = pipeline("question-answering", model="uer/roberta-base-chinese-extractive-qa")
qa_pipeline(question="是谁?", context="是帅哥!")
# {'score': 0.004711466375738382, 'start': 4, 'end': 6, 'answer': '帅哥'}
qa_pipeline
的类 QuestionAnsweringPipeline
,然后 Ctrl+鼠标左键
查看 __call__
方法源码<br />
checkpoint = "google/owlvit-base-patch32"
detector = pipeline(model=checkpoint, task="zero-shot-object-detection")
import requests
from PIL import Image
url = "https://unsplash.com/photos/oj0zeY2Ltk4/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8MTR8fHBpY25pY3xlbnwwfHx8fDE2Nzc0OTE1NDk&force=true&w=640"
im = Image.open(requests.get(url, stream=True).raw)
im
predictions = detector(im,
candidate_labels = ["hat", "book"])
from PIL import ImageDraw
draw = ImageDraw.Draw(im)
for prediction in predictions:
box = prediction["box"]
label = prediction["label"]
score = prediction["score"]
xmin, ymin, xmax, ymax = box.values()
draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1)
draw.text((xmin, ymin), f"{label}: {round(score,2)}", fill="red")
im
'''
1.处理输入
2.模型输出
3.id2label model.config.id2label
'''
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
tokenizer = AutoTokenizer.from_pretrained("uer/roberta-base-finetuned-dianping-chinese")
text = "我觉得不行!"
input = tokenizer(text, return_tensors="pt") # 注意return_tensors="pt"
output = model(**input)
logits = torch.softmax(output.logits, dim= -1)
id = torch.argmax(logits).item()
# id2label = {
# 1:"Positive",
# 0:"Negtivate",
# }
model.config.id2label = id2label
print(text, "\n", model.config.id2label.get(id))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。