当前位置:   article > 正文

小白学Pytorch使用(5):LSTM文本分类_pytorch lstm 文本分类

pytorch lstm 文本分类

任务背景

利用LSTM(长短期记忆)网络结构训练小样本文本分类任务。
数据集及代码如下:
LSTM文本分类数据集+代码+模型

一、Model/TextRNN.py

# coding: UTF-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class Config(object):

    """配置参数"""
    def __init__(self, dataset, embedding):
    # 均为相对路径,无需修改直接使用
        self.model_name = 'TextRNN'
        self.train_path = dataset + '/data/train.txt'                                # 训练集——相对路径
        self.dev_path = dataset + '/data/dev.txt'                                    # 验证集
        self.test_path = dataset + '/data/test.txt'                                  # 测试集
        self.class_list = [x.strip() for x in open(
            dataset + '/data/class.txt').readlines()]                                # 读取类别名单
        self.vocab_path = dataset + '/data/vocab.pkl'                                # 词表
        self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'        # 模型训练结果——保存路径
        self.log_path = dataset + '/log/' + self.model_name                          # 训练日志——保存路径

        # 加载预训练词向量,以关键字获取,数据转32位浮点型再转tensor格式,(4762, 300)
        # NpzFile 'D:\\咕泡人工智能-配套资料\\配套资料\\4.第四章 深度学习核⼼框架PyTorch\\第七章:LSTM文本分类实战\\text\\THUCNews/data/embedding_SougouNews.npz' with keys: embeddings
        self.embedding_pretrained = torch.tensor(
            np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
            if embedding != 'random' else None                                       # 预训练词向量

        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')   # 设备,cpu还是gpu

        self.dropout = 0.5                                              # 随机失活
        self.require_improvement = 1000                                 # 若超过1000batch效果还没提升,则提前结束训练
        self.num_classes = len(self.class_list)                         # 类别数
        self.n_vocab = 0                                                # 词表大小,在运行时赋值
        self.num_epochs = 10                                            # epoch数
        self.batch_size = 128                                           # mini-batch大小
        self.pad_size = 40                                              # 每句话处理成的长度(短填长切)
        self.learning_rate = 1e-3                                       # 学习率
        self.embed = self.embedding_pretrained.size(1)\
            if self.embedding_pretrained is not None else 300           # 字向量维度, 若使用了预训练词向量,则维度统一,300
        self.hidden_size = 128                                          # lstm隐藏层
        self.num_layers = 3                                             # lstm层数


'''Recurrent Neural Network for Text Classification with Multi-Task Learning'''

# <bound method Module.parameters of Model(
#   (embedding): Embedding(4762, 300)
#   (lstm): LSTM(300, 128, num_layers=3, batch_first=True, dropout=0.5, bidirectional=True)
#   (fc): Linear(in_features=256, out_features=10, bias=True)
# )>
class Model(nn.Module):
    def __init__(self, config):
        super(Model, self).__init__()
        # 获取预训练模型数据
        if config.embedding_pretrained is not None:
             # print(config.embedding_pretrained)
            # tensor([[0.2983, 0.4106, 0.8946, ..., 0.6417, 0.8806, 0.1683],
            #         [0.0010, -0.1334, -0.1902, ..., -0.1443, -0.5212, 0.2069],
            #         [-0.0249, 0.1308, -0.4010, ..., 0.3485, -0.5099, -0.1834],
            #         ...,
            #         [-0.2030, 0.1445, -0.0035, ..., -0.2927, -0.1555, 0.0662],
            #         [0.5052, 0.6515, 0.4099, ..., 0.6391, 0.2736, 0.7934],
            #         [0.2890, 0.8864, 0.6253, ..., 0.6731, 0.7833, 0.1340]])
            self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
            # print(self.embedding)
            # Embedding(4762, 300)
        else:
            self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
            
        # (lstm): LSTM(300, 128, num_layers=3, batch_first=True, dropout=0.5, bidirectional=True)
        self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
                            bidirectional=True, batch_first=True, dropout=config.dropout)
        # bidirectional=True:RNN每层正向传递得到128的向量与反向传递得到128的向量结合,得到256的向量
        # batch_first=True:输入张量的形状应为(batch, pad_size, embedding),即batch_size,句子长度, 词向量长度;输出张量的形状是(batch, pad_size, hidden_size)

        # 输出层,(256,10)
		# (fc): Linear(in_features=256, out_features=10, bias=True)
        self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)

    def forward(self, x):
        # print(x)
        # (tensor([[14, 125, 55, ..., 4760, 4760, 4760],
        #          [135, 80, 33, ..., 4760, 4760, 4760],
        #          [152, 13, 469, ..., 4760, 4760, 4760],
        #          ...,
        #          [160, 1667, 1147, ..., 4760, 4760, 4760],
        #          [31, 75, 4, ..., 4760, 4760, 4760],
        #          [321, 566, 130, ..., 4760, 4760, 4760]]),
        #  tensor([18, 22, 25, 25, 23, 20, 17, 22, 16, 11, 23, 23, 22, 15, 7, 23, 20, 25,
        #          15, 9, 17, 15, 24, 20, 17, 17, 13, 20, 19, 20, 22, 22, 21, 22, 23, 19,
        #          12, 20, 23, 18, 22, 25, 23, 20, 19, 17, 17, 15, 17, 26, 16, 22, 21, 18,
        #          16, 12, 23, 19, 20, 21, 12, 24, 18, 14, 25, 16, 24, 24, 23, 20, 20, 20,
        #          18, 16, 23, 14, 23, 21, 19, 17, 24, 21, 23, 23, 19, 15, 12, 22, 25, 14,
        #          21, 20, 22, 15, 22, 18, 16, 17, 13, 21, 21, 18, 21, 11, 19, 22, 14, 22,
        #          15, 22, 15, 22, 22, 15, 25, 16, 18, 18, 14, 19, 13, 29, 20, 18, 22, 16,
        #          18, 22]))
        x, _ = x
        # print(x)
        # tensor([[14, 125, 55, ..., 4760, 4760, 4760],
        #          [135, 80, 33, ..., 4760, 4760, 4760],
        #          [152, 13, 469, ..., 4760, 4760, 4760],
        #          ...,
        #          [160, 1667, 1147, ..., 4760, 4760, 4760],
        #          [31, 75, 4, ..., 4760, 4760, 4760],
        #          [321, 566, 130, ..., 4760, 4760, 4760]])

        # 每个ID转换为embedding中的词向量
        out = self.embedding(x)  # [batch_size, seq_len, embeding]=[128, 40, 300]
        # 输入LSTM网络训练
        out, _ = self.lstm(out)  # out: torch.Size([128, 40, 256])
        # 取最后一个词的结果
        out = self.fc(out[:, -1, :])  # 句子最后时刻的 hidden state
        return out

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115

2、THUCNews/run.py

import time
import torch
import numpy as np
from train_eval import train, init_network
from importlib import import_module
import argparse
# 可视化脚本
from tensorboardX import SummaryWriter

# 参数命令行
parser = argparse.ArgumentParser(description='Chinese Text Classification')
# 选择模型
parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer')
# 选择预训练Embedding表
parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')
# 词分割还是字分割
parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
args = parser.parse_args()
# model='TextRNN',embedding='pre_trained', word=False

if __name__ == '__main__':
    # 数据集——相对路径
    dataset = 'THUCNews'  # 数据集

    # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random
    embedding = 'embedding_SougouNews.npz'
    if args.embedding == 'random':
        embedding = 'random'
    model_name = args.model  #TextCNN, TextRNN,
    if model_name == 'FastText':
        from utils_fasttext import build_dataset, build_iterator, get_time_dif
        embedding = 'random'
    else:
        from utils import build_dataset, build_iterator, get_time_dif

    x = import_module('models.' + model_name)
    config = x.Config(dataset, embedding)
    # config: <models.TextRNN.Config object at 0x00000243F8850B80>
    # 随机数种子,确保每次训练随机结果相同
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    start_time = time.time()
    print("Loading data...")

    # 将训练集,验证集,测试集中的文本数据转换为词ID形式,格式为(文本词ID, 标签, 文本长度)
    # ([51, 112, 19, 31, 439, 726, 296, 1028, 0, 179, 51, 47, 62, 2070, 669, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760], 2, 15)
    vocab, train_data, dev_data, test_data = build_dataset(config, args.word)

    # 划分数据集
    train_iter = build_iterator(train_data, config)
    # print(train_iter)
    # <utils.DatasetIterater object at 0x0000028AEE21DC40>
    dev_iter = build_iterator(dev_data, config)
    test_iter = build_iterator(test_data, config)
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)

    # train
    # 词表长度Vocab size: 4762
    config.n_vocab = len(vocab)
    model = x.Model(config).to(config.device)
    writer = SummaryWriter(log_dir=config.log_path + '/' + time.strftime('%m-%d_%H.%M', time.localtime()))
    if model_name != 'Transformer':
        init_network(model)
    print(model.parameters)  
    # <bound method Module.parameters of Model(
    #   (embedding): Embedding(4762, 300)
    #   (lstm): LSTM(300, 128, num_layers=3, batch_first=True, dropout=0.5, bidirectional=True)
    #   (fc): Linear(in_features=256, out_features=10, bias=True)
    # )>
    train(config, model, train_iter, dev_iter, test_iter, writer)

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75

三、THUCNews/train_eval.py

# coding: UTF-8
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn import metrics
import time
from utils import get_time_dif
from tensorboardX import SummaryWriter


# 权重初始化,默认xavier
def init_network(model, method='xavier', exclude='embedding', seed=123):
    # print(model.named_parameters)
    # <bound method Module.named_parameters of Model(
    #   (embedding): Embedding(4762, 300)
    #   (lstm): LSTM(300, 128, num_layers=3, batch_first=True, dropout=0.5, bidirectional=True)
    #   (fc): Linear(in_features=256, out_features=10, bias=True)
    # )>
    for name, w in model.named_parameters():
        # print(name)
        # print(w.shape)
        # embedding.weight
        # torch.Size([4762, 300])
        # lstm.weight_ih_l0
        # torch.Size([512, 300])
        # lstm.weight_hh_l0
        # torch.Size([512, 128])
        # lstm.bias_ih_l0
        # torch.Size([512])
        # lstm.bias_hh_l0
        # torch.Size([512])
        # lstm.weight_ih_l0_reverse
        # torch.Size([512, 300])
        # lstm.weight_hh_l0_reverse
        # torch.Size([512, 128])
        # lstm.bias_ih_l0_reverse
        # torch.Size([512])
        # lstm.bias_hh_l0_reverse
        # torch.Size([512])
        # lstm.weight_ih_l1
        # torch.Size([512, 256])
        # lstm.weight_hh_l1
        # torch.Size([512, 128])
        # lstm.bias_ih_l1
        # torch.Size([512])
        # lstm.bias_hh_l1
        # torch.Size([512])
        # lstm.weight_ih_l1_reverse
        # torch.Size([512, 256])
        # lstm.weight_hh_l1_reverse
        # torch.Size([512, 128])
        # lstm.bias_ih_l1_reverse
        # torch.Size([512])
        # lstm.bias_hh_l1_reverse
        # torch.Size([512])
        # lstm.weight_ih_l2
        # torch.Size([512, 256])
        # lstm.weight_hh_l2
        # torch.Size([512, 128])
        # lstm.bias_ih_l2
        # torch.Size([512])
        # lstm.bias_hh_l2
        # torch.Size([512])
        # lstm.weight_ih_l2_reverse
        # torch.Size([512, 256])
        # lstm.weight_hh_l2_reverse
        # torch.Size([512, 128])
        # lstm.bias_ih_l2_reverse
        # torch.Size([512])
        # lstm.bias_hh_l2_reverse
        # torch.Size([512])
        # fc.weight
        # torch.Size([10, 256])
        # fc.bias
        # torch.Size([10])
        if exclude not in name:
            if 'weight' in name:
                if method == 'xavier':
                    nn.init.xavier_normal_(w)
                elif method == 'kaiming':
                    nn.init.kaiming_normal_(w)
                else:
                    nn.init.normal_(w)
            elif 'bias' in name:
                nn.init.constant_(w, 0)
            else:
                pass


def train(config, model, train_iter, dev_iter, test_iter,writer):
    start_time = time.time()
    # 训练模式。dropout随机失活,标准化和归一化参数更新
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)

    # 学习率指数衰减,每次epoch:学习率 = gamma * 学习率
    # scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
    total_batch = 0  # 记录进行到多少batch
    dev_best_loss = float('inf')
    last_improve = 0  # 记录上次验证集loss下降的batch数
    flag = False  # 记录是否很久没有效果提升
    #writer = SummaryWriter(log_dir=config.log_path + '/' + time.strftime('%m-%d_%H.%M', time.localtime()))
    for epoch in range(config.num_epochs):
        print('Epoch [{}/{}]'.format(epoch + 1, config.num_epochs))
        # scheduler.step() # 学习率衰减
        for i, (trains, labels) in enumerate(train_iter):
            #print (trains[0].shape)
            # train为元组格式,train[0]为词向量,train[1]为词长度
            outputs = model(trains)
            # 梯度清零
            model.zero_grad()
            loss = F.cross_entropy(outputs, labels)
            loss.backward()
            optimizer.step()
            if total_batch % 100 == 0:
                # 每多少轮输出在训练集和验证集上的效果
                true = labels.data.cpu()
                predic = torch.max(outputs.data, 1)[1].cpu()
                train_acc = metrics.accuracy_score(true, predic)
                dev_acc, dev_loss = evaluate(config, model, dev_iter)
                if dev_loss < dev_best_loss:
                    dev_best_loss = dev_loss
                    torch.save(model.state_dict(), config.save_path)
                    improve = '*'
                    last_improve = total_batch
                else:
                    improve = ''
                time_dif = get_time_dif(start_time)
                msg = 'Iter: {0:>6},  Train Loss: {1:>5.2},  Train Acc: {2:>6.2%},  Val Loss: {3:>5.2},  Val Acc: {4:>6.2%},  Time: {5} {6}'
                print(msg.format(total_batch, loss.item(), train_acc, dev_loss, dev_acc, time_dif, improve))
                writer.add_scalar("loss/train", loss.item(), total_batch)
                writer.add_scalar("loss/dev", dev_loss, total_batch)
                writer.add_scalar("acc/train", train_acc, total_batch)
                writer.add_scalar("acc/dev", dev_acc, total_batch)
                model.train()
            total_batch += 1
            if total_batch - last_improve > config.require_improvement:
                # 验证集loss超过1000batch没下降,结束训练
                print("No optimization for a long time, auto-stopping...")
                flag = True
                break
        if flag:
            break
    writer.close()
    test(config, model, test_iter)


def test(config, model, test_iter):
    # test
    model.load_state_dict(torch.load(config.save_path))
    # 验证模式。dropout不会随机失活,标准化和归一化参数使用训练时的参数
    model.eval()
    start_time = time.time()
    test_acc, test_loss, test_report, test_confusion = evaluate(config, model, test_iter, test=True)
    msg = 'Test Loss: {0:>5.2},  Test Acc: {1:>6.2%}'
    print(msg.format(test_loss, test_acc))
    print("Precision, Recall and F1-Score...")
    print(test_report)
    print("Confusion Matrix...")
    print(test_confusion)
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)


def evaluate(config, model, data_iter, test=False):
    model.eval()
    loss_total = 0
    predict_all = np.array([], dtype=int)
    labels_all = np.array([], dtype=int)
    with torch.no_grad():
        for texts, labels in data_iter:
            outputs = model(texts)
            loss = F.cross_entropy(outputs, labels)
            loss_total += loss
            labels = labels.data.cpu().numpy()
            predic = torch.max(outputs.data, 1)[1].cpu().numpy()
            labels_all = np.append(labels_all, labels)
            predict_all = np.append(predict_all, predic)

    acc = metrics.accuracy_score(labels_all, predict_all)
    if test:
        report = metrics.classification_report(labels_all, predict_all, target_names=config.class_list, digits=4)
        confusion = metrics.confusion_matrix(labels_all, predict_all)
        return acc, loss_total / len(data_iter), report, confusion
    return acc, loss_total / len(data_iter)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186

四、THUCNews/utils.py

# coding: UTF-8
import os
import torch
import numpy as np
import pickle as pkl
from tqdm import tqdm
import time
from datetime import timedelta


MAX_VOCAB_SIZE = 10000  # 词表长度限制
# UNK未知字符,如果文本数据中出现了语料表中没有的字,那么使用UNK填充;如果字符长度不足pad_size,使用PAD填充
# 如果embedding中使用到4759位,数据中出现语料表中不存在的字,那么用4760表示所有语料表中不存在的字的ID,用4761表示PAD填充,如果不存在UNK,那么用4760表示PAD————个人理解,不一定正确
UNK, PAD = '<UNK>', '<PAD>'  # 未知字,padding符号


def build_vocab(file_path, tokenizer, max_size, min_freq):
    vocab_dic = {}
    with open(file_path, 'r', encoding='UTF-8') as f:
        for line in tqdm(f):
            lin = line.strip()
            if not lin:
                continue
            content = lin.split('\t')[0]
            for word in tokenizer(content):
                vocab_dic[word] = vocab_dic.get(word, 0) + 1
        vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]
        vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}
        vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
    return vocab_dic


def build_dataset(config, ues_word):
    # 判断以词分割还是字分割
    if ues_word:
        tokenizer = lambda x: x.split(' ')  # 以空格隔开,word-level
    else:
        tokenizer = lambda x: [y for y in x]  # char-level
    # 加载语料表
    if os.path.exists(config.vocab_path):
        vocab = pkl.load(open(config.vocab_path, 'rb'))
    else:
        vocab = build_vocab(config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
        pkl.dump(vocab, open(config.vocab_path, 'wb'))
    print(f"Vocab size: {len(vocab)}")

    def load_dataset(path, pad_size=32):
        contents = []
        with open(path, 'r', encoding='UTF-8') as f:
            for line in tqdm(f):
                # line: '中华女子学院:本科层次仅1专业招男生\t3\n'
                # 删去换行符
                lin = line.strip()
                # lin: '中华女子学院:本科层次仅1专业招男生\t3'
                if not lin:
                    continue
                # 以转义字符\t分割,得到句子和标签
                # content: '中华女子学院:本科层次仅1专业招男生'     label: '3'
                content, label = lin.split('\t')
                words_line = []
                # 以字分割句子,不足pad_size部分补足,此处填充ID为4761
                token = tokenizer(content)
                # ['中', '华', '女', '子', '学', '院', ':', '本', '科', '层', '次', '仅', '1', '专', '业', '招', '男', '生', 4761, 4761, 4761, 4761, 4761, 4761, 4761, 4761, 4761, 4761, 4761, 4761, 4761, 4761]
                seq_len = len(token)
                if pad_size:
                    # 句子长度小于pad_size,不足pad_size部分补足PAD
                    if len(token) < pad_size:
                        token.extend([vocab.get(PAD)] * (pad_size - len(token)))
                    else:
                        # 句子长度大于pad_size,多余部分舍去
                        token = token[:pad_size]
                        seq_len = pad_size
                # word to id,文本字转语料表对应ID
                for word in token:
                    # ID不存在的用UNK代替
                    words_line.append(vocab.get(word, vocab.get(UNK)))
                contents.append((words_line, int(label), seq_len))
        # 字ID列表,标签,长度   contents: [([14, 125, 55, 45, 35, 307, 4, 81, 161, 941, 258, 494, 2, 175, 48, 145, 97, 17, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760, 4760], 3, 18)]
        return contents  # [([...], 0), ([...], 1), ...]
    train = load_dataset(config.train_path, config.pad_size)
    dev = load_dataset(config.dev_path, config.pad_size)
    test = load_dataset(config.test_path, config.pad_size)
    return vocab, train, dev, test


class DatasetIterater(object):
    def __init__(self, batches, batch_size, device):
        self.batch_size = batch_size    #128
        self.batches = batches      #训练集180000
        self.n_batches = len(batches) // batch_size     #划分batch个数
        self.residue = False  # 记录batch数量是否为整数
        # 数据集是否能整除batch数
        if len(batches) % self.n_batches != 0:
            self.residue = True
        self.index = 0
        self.device = device

    def _to_tensor(self, datas):
        x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
        y = torch.LongTensor([_[1] for _ in datas]).to(self.device)

        # pad前的长度(超过pad_size的设为pad_size)
        seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
        return (x, seq_len), y

    def __next__(self):
        if self.residue and self.index == self.n_batches:
            batches = self.batches[self.index * self.batch_size: len(self.batches)]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

        elif self.index > self.n_batches:
            self.index = 0
            raise StopIteration
        else:
            batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

    def __iter__(self):
        return self

    def __len__(self):
        if self.residue:
            return self.n_batches + 1
        else:
            return self.n_batches


def build_iterator(dataset, config):
    iter = DatasetIterater(dataset, config.batch_size, config.device)
    return iter


def get_time_dif(start_time):
    """获取已使用时间"""
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))


if __name__ == "__main__":
    '''提取预训练词向量'''
    # 下面的目录、文件名按需更改。
    train_dir = "./THUCNews/data/train.txt"
    vocab_dir = "./THUCNews/data/vocab.pkl"
    pretrain_dir = "./THUCNews/data/sgns.sogou.char"
    emb_dim = 300
    filename_trimmed_dir = "./THUCNews/data/embedding_SougouNews"
    if os.path.exists(vocab_dir):
        word_to_id = pkl.load(open(vocab_dir, 'rb'))
    else:
        # tokenizer = lambda x: x.split(' ')  # 以词为单位构建词表(数据集中词之间以空格隔开)
        tokenizer = lambda x: [y for y in x]  # 以字为单位构建词表
        word_to_id = build_vocab(train_dir, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
        pkl.dump(word_to_id, open(vocab_dir, 'wb'))

    embeddings = np.random.rand(len(word_to_id), emb_dim)
    f = open(pretrain_dir, "r", encoding='UTF-8')
    for i, line in enumerate(f.readlines()):
        # if i == 0:  # 若第一行是标题,则跳过
        #     continue
        lin = line.strip().split(" ")
        if lin[0] in word_to_id:
            idx = word_to_id[lin[0]]
            emb = [float(x) for x in lin[1:301]]
            embeddings[idx] = np.asarray(emb, dtype='float32')
    f.close()
    np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172

分类结果:
训练过程示意1
训练过程示意2
训练过程示意3
训练过程示意4
训练过程示意5
第6个epoch开始训练时由于准确率在1000个batch效果没有提升,训练中断,最佳模型出现在第5个epoch中,后续迭代出现过拟合现象。

本文内容由网友自发贡献,转载请注明出处:https://www.wpsshop.cn/w/你好赵伟/article/detail/877771
推荐阅读
相关标签
  

闽ICP备14008679号