赞
踩
文本分类作为NLP基础任务,是最常用的服务了,本文提供一个开箱即用的文本分类器。
模型model.py文件如下:
# File model.py # -*- coding: utf-8 -*- import torch import os from torch import nn from transformers import BertForSequenceClassification, BertConfig class BertModel(nn.Module): def __init__(self, num_labels): super(BertModel, self).__init__() self.bert = BertForSequenceClassification.from_pretrained("hfl/chinese-roberta-wwm-ext", num_labels=num_labels) self.device = torch.device("cuda") for param in self.bert.parameters(): param.requires_grad = True # 每个参数都要求梯度,也可以冻结一些层 def forward(self, batch_seqs, batch_seq_masks, batch_seq_segments, labels): loss, logits = self.bert(input_ids = batch_seqs, attention_mask = batch_seq_masks, token_type_ids=batch_seq_segments, labels = labels)[:2] probabilities = nn.functional.softmax(logits, dim=-1) prob, pre_label = torch.max(probabilities, 1) return loss, pre_label, prob
data.py文件如下:
# -*- coding: utf-8 -*- from torch.utils.data import Dataset from hanziconv import HanziConv import pandas as pd import torch class DataPrecessForCLF(Dataset): def __init__(self, bert_tokenizer, df, max_char_len): self.y = torch.LongTensor(df["id"]) self.max_seq_len = max_char_len df["sentence"] = df["sentence"].apply(lambda i: HanziConv.toSimplified(i)) self.encoded_inputs = bert_tokenizer(df["sentence"].tolist(), padding="max_length", truncation=True, max_length=max_char_len, return_tensors="pt") def __len__(self): return len(self.y) def __getitem__(self, idx): assert len(self.encoded_inputs["input_ids"][idx]) == len(self.encoded_inputs["attention_mask"][idx]) == len(self.encoded_inputs["token_type_ids"][idx]) return self.encoded_inputs["input_ids"][idx], self.encoded_inputs["attention_mask"][idx], self.encoded_inputs["token_type_ids"][idx], self.y[idx]
utils.py文件如下:
# -*- coding: utf-8 -*- import torch import torch.nn as nn import time from tqdm import tqdm def validate(model, dataloader): model.eval() device = model.device epoch_start = time.time() batch_time_avg = 0.0 running_loss = 0.0 correct_preds = 0 for batch_idx, (batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels) in enumerate(dataloader): batch_start = time.time() seqs, masks, segments, labels = batch_seqs.to(device), batch_seq_masks.to(device), batch_seq_segments.to( device), batch_labels.to(device) loss, pre_label, prob = model(seqs, masks, segments, labels) batch_time_avg += time.time() - batch_start running_loss += loss.item() correct_preds += (pre_label == labels).sum().item() epoch_time = time.time() - epoch_start epoch_loss = running_loss / len(dataloader) epoch_accuracy = correct_preds / len(dataloader.dataset) return epoch_time, epoch_loss, epoch_accuracy def test(model, dataloader, inference=False): model.eval() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备 time_start = time.time() batch_time = 0.0 correct_preds = 0 all_prob = [] all_pred_label = [] all_labels = [] # Deactivate autograd for evaluation. with torch.no_grad(): for (batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels) in dataloader: batch_start = time.time() seqs, masks, segments, labels = batch_seqs.to(device), batch_seq_masks.to(device), batch_seq_segments.to(device), batch_labels.to(device) loss, pre_label, prob = model(seqs, masks, segments, labels) correct_preds += (pre_label == labels).sum().item() batch_time += time.time() - batch_start all_prob.extend(prob.cpu().numpy()) all_pred_label.extend(pre_label.cpu().numpy()) all_labels.extend(batch_labels) batch_time /= len(dataloader) total_time = time.time() - time_start if inference: return all_pred_label, all_prob, total_time accuracy = correct_preds / len(dataloader.dataset) return batch_time, total_time, accuracy def train(model, dataloader, optimizer, epoch_number, max_gradient_norm): model.train() device = model.device epoch_start = time.time() batch_time_avg = 0.0 running_loss = 0.0 correct_preds = 0 tqdm_batch_iterator = tqdm(dataloader) for batch_index, (batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels) in enumerate(tqdm_batch_iterator): batch_start = time.time() seqs, masks, segments, labels = batch_seqs.to(device), batch_seq_masks.to(device), batch_seq_segments.to(device), batch_labels.to(device) optimizer.zero_grad() loss, pre_label, prob = model(seqs, masks, segments, labels) loss.backward() nn.utils.clip_grad_norm_(model.parameters(), max_gradient_norm) optimizer.step() batch_time_avg += time.time() - batch_start running_loss += loss.item() correct_preds += (pre_label == labels).sum().item() description = "Avg. batch proc. time: {:.4f}s, loss: {:.4f}"\ .format(batch_time_avg/(batch_index+1), running_loss/(batch_index+1)) tqdm_batch_iterator.set_description(description) epoch_time = time.time() - epoch_start epoch_loss = running_loss / len(dataloader) epoch_accuracy = correct_preds / len(dataloader.dataset) return epoch_time, epoch_loss, epoch_accuracy
对于模型在真实数据上的训练脚本train.py
# -*- coding: utf-8 -*- import os import sys import torch from torch import nn from torch.utils.data import DataLoader from data import DataPrecessForCLF from utils import train, validate from model import BertModel from transformers import BertTokenizer from transformers.optimization import AdamW import pandas as pd from sklearn.preprocessing import LabelEncoder import json from sklearn.model_selection import train_test_split le = LabelEncoder() def main(train_file, target_dir, epochs=25, batch_size=16, lr=2e-05, patience=5, max_char_len=512, max_grad_norm=10.0, checkpoint=None): bert_tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", do_lower_case=True) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备 print(20 * "=", " Preparing for training ", 20 * "=") # 保存模型的路径 if not os.path.exists(target_dir): os.makedirs(target_dir) # -------------------- Data loading ------------------- # data = pd.read_csv(train_file) data = data[["sentence", "label"]] data["sentence"] = data["sentence"].apply(lambda i: str(i)) data["label"] = data["label"].apply(lambda i: str(i)) data["id"] = torch.LongTensor(le.fit_transform(data["label"])) label = data[["id", "label"]] label = label.drop_duplicates() label_dict = {} for index, row in label.iterrows(): label_dict[row["label"]] = row["id"] refund_map = dict(sorted(label_dict.items(), key=lambda d: d[0])) label_num = len(refund_map) with open(target_dir + '/label2id.json', "w", encoding="utf-8") as f: json.dump(refund_map, f, ensure_ascii=False, indent=4) print("the classification label num is {}".format(label_num)) # train_dev_split df_train, df_dev, y_train, y_test = train_test_split(data, data["label"], test_size=0.2, stratify=data["label"], random_state=666) df_train.reset_index(inplace=True, drop=True) df_dev.reset_index(inplace=True, drop=True) print("\t* Loading training data... , dataset size is {}".format(len(df_train))) train_data = DataPrecessForCLF(bert_tokenizer, df=df_train, max_char_len=max_char_len) train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size) print("\t* Loading validation data... , dataset size is {}".format(len(df_dev))) dev_data = DataPrecessForCLF(bert_tokenizer, df=df_dev, max_char_len=max_char_len) dev_loader = DataLoader(dev_data, shuffle=True, batch_size=batch_size) # -------------------- Model definition ------------------- # print("\t* Building model...") model = BertModel(label_num).to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ { 'params':[p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay':0.01 }, { 'params':[p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay':0.0 } ] optimizer = AdamW(optimizer_grouped_parameters, lr=lr) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode="max", factor=0.85, patience=0) best_score = 0.0 start_epoch = 1 # Data for loss curves plot epochs_count = [] train_losses = [] valid_losses = [] # Continuing training from a checkpoint if one was given as argument if checkpoint: checkpoint = torch.load(checkpoint) start_epoch = checkpoint["epoch"] + 1 best_score = checkpoint["best_score"] print("\t* Training will continue on existing model from epoch {}...".format(start_epoch)) model.load_state_dict(checkpoint["model"]) optimizer.load_state_dict(checkpoint["optimizer"]) epochs_count = checkpoint["epochs_count"] train_losses = checkpoint["train_losses"] valid_losses = checkpoint["valid_losses"] # Compute loss and accuracy before starting (or resuming) training. _, valid_loss, valid_accuracy = validate(model, dev_loader) print("\t* Validation loss before training: {:.4f}, accuracy: {:.4f}%".format(valid_loss, (valid_accuracy*100))) # -------------------- Training epochs ------------------- # print("\n", 20 * "=", "Training Bert model on device: {}".format(device), 20 * "=") patience_counter = 0 for epoch in range(start_epoch, epochs + 1): epochs_count.append(epoch) print("* Training epoch {}:".format(epoch)) epoch_time, epoch_loss, epoch_accuracy = train(model, train_loader, optimizer, epoch, max_grad_norm) train_losses.append(epoch_loss) print("-> Training time: {:.4f}s, loss = {:.4f}, accuracy: {:.4f}%" .format(epoch_time, epoch_loss, (epoch_accuracy*100))) print("* Validation for epoch {}:".format(epoch)) epoch_time, epoch_loss, epoch_accuracy = validate(model, dev_loader) valid_losses.append(epoch_loss) print("-> Valid. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\n" .format(epoch_time, epoch_loss, (epoch_accuracy*100))) # Update the optimizer's learning rate with the scheduler. scheduler.step(epoch_accuracy) # Early stopping on validation accuracy. if epoch_accuracy < best_score: patience_counter += 1 else: best_score = epoch_accuracy patience_counter = 0 torch.save({"epoch": epoch, "model": model.state_dict(), "best_score": best_score, "epochs_count": epochs_count, "train_losses": train_losses, "valid_losses": valid_losses}, os.path.join(target_dir, "0704_nre.pth.tar")) if patience_counter >= patience: print("-> Early stopping: patience limit reached, stopping...") break if __name__ == "__main__": main(os.path.join(base_path, "model_data/train.csv"), os.path.join(base_path, "checkpoint"))
对模型进行测试集评测test.py
# -*- coding: utf-8 -*- import torch from sys import platform from torch.utils.data import DataLoader from transformers import BertTokenizer import pandas as pd from data import DataPrecessForCLF from utils import test from model import BertModel import os import json from sklearn import metrics with open("checkpoint/label2id.json", "r", encoding="utf-8") as f: label2id = json.load(f) id2label = {v: k for k, v in label2id.items()} def get_label(key): return id2label.get(int(key)) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备 bert_tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", do_lower_case=True) def file_test_prf(test_file, pretrained_file, batch_size=8): checkpoint = torch.load(pretrained_file, map_location=device) df_test = pd.read_csv(test_file) df_test["id"] = 0 inference_data = DataPrecessForCLF(bert_tokenizer, df=df_test, max_char_len=512) inference_loader = DataLoader(inference_data, shuffle=False, batch_size=batch_size) model = BertModel(num_labels=len(label2id)).to(device) model.load_state_dict(checkpoint["model"]) max_index, max_prob, total_time = test(model, inference_loader, inference=True) all_pred_label = list(map(get_label, max_index)) df_test["pred_label"] = all_pred_label df_test["prob"] = max_prob useful_label = list(label2id.keys()) useful_label.remove("其他") # 其他作为副采样的数据可以不参与评测 print(metrics.classification_report(list(df_test["label"]), all_pred_label, labels=useful_label, digits=3)) df_test.to_csv(os.path.join(base_path, "result_files/test_result.csv"), index=False) if __name__ == "__main__": file_test_prf(os.path.join(base_path, "model_data/test.csv"), os.path.join(base_path, "checkpoint/clf.pth.tar"))
其中train.csv 和 test.csv只需要满足拥有sentence和label列即可,sample如下:
sentence,label
今天天气不错,天气
今天大盘三大指数全部收红,股票
requirements.txt如下,以上脚本基于python3.8运行无误:
torch==1.9.1
transformers==4.12.5
hanziconv==0.3.2
pandas==1.4.2
tqdm==4.64.0
scikit-learn==1.1.1
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。