赞
踩
nlp-notebooks/Text classification with BERT in PyTorch.ipynb
参考解决方案 ,我选择的解决方案是继承BertForSequenceClassification并改写,即将上述代码的ln [9] 改为以下内容:
from transformers.modeling_bert import BertForSequenceClassification from transformers.modeling_outputs import SequenceClassifierOutput class BertForMultilabelSequenceClassification(BertForSequenceClassification): def __init__(self, config): super().__init__(config) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = torch.nn.BCEWithLogitsLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.float().view(-1, self.num_labels)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) model = BertForMultilabelSequenceClassification.from_pretrained(BERT_MODEL, num_labels = len(label2idx)) model.to(device)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。