赞
踩
from openprompt.data_utils import InputExample classes=[ 'negative', 'positive' ] dataset=[ InputExample( guid = 0, text_a = "Albert Einstein was one of the greatest intellects of his time.", ), InputExample( guid = 1, text_a = "The film was badly made.", ), ]
from openprompt.plms import load_plm
plm,tokenizer,model_config,WrapperClass=load_plm('bert',"bert-base-cased")
from openprompt.prompts import ManualTemplate
promptTemplate=ManualTemplate(
text='{"placeholder":"text_a"} It was {"mask"}',
tokenizer=tokenizer,
)
from openprompt.prompts import ManualVerbalizer
promptVerbalizer=ManualVerbalizer(
classes=classes,
label_words={
'negative':['bad'],
'positive':['good','wonderful','great'],
},
tokenizer=tokenizer,
)
from openprompt import PromptForClassification
promptModel=PromptForClassification(
template=promptTemplate,
plm=plm,
verbalizer=promptVerbalizer,
)
from openprompt import PromptDataLoader
data_loader=PromptDataLoader(
dataset=dataset,
tokenizer=tokenizer,
template=promptTemplate,
tokenizer_wrapper_class=WrapperClass,
)
# making zero-shot inference using pretrained MLM with prompt
promptModel.eval()
with torch.no_grad():
for batch in data_loader:
logits=promptModel(batch)
preds=torch.argmax(logits,dim=-1)
print(classes[preds])
# predictions would be 1, 0 for classes 'positive', 'negative'
参考知乎
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。