当前位置:   article > 正文

文本分类(二) | (4) 模型及其配置的定义_self.pad_size

self.pad_size

完整项目​​​​​​​

本篇博客,主要介绍各个模型的模块定义,包括模型本身的定义以及模型对应的配置(超参数)的定义,二者在一个模块文件中。

目录

1. FastText

2. TextCNN

3. TextRNN

4. TextRCNN

5. TextRNN_Atten

6. DPCNN

7. Transformer


1. FastText

  • 配置类
  1. class Config(object):
  2. """FastText配置参数"""
  3. def __init__(self, dataset, embedding):
  4. self.model_name = 'FastText'
  5. #训练集、验证集、测试集路径
  6. self.train_path = dataset + '/data/train.txt'
  7. self.dev_path = dataset + '/data/dev.txt'
  8. self.test_path = dataset + '/data/test.txt'
  9. #数据集的所有类别
  10. self.class_list = [x.strip() for x in open(
  11. dataset + '/data/class.txt').readlines()]
  12. #构建好的词/字典路径
  13. self.vocab_path = dataset + '/data/vocab.pkl'
  14. #训练好的模型参数保存路径
  15. self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
  16. #模型日志保存路径
  17. self.log_path = dataset + '/log/' + self.model_name
  18. #如果词/字嵌入矩阵不随机初始化 则加载初始化好的词/字嵌入矩阵 类别为float32 并转换为tensor 否则为None
  19. self.embedding_pretrained = torch.tensor(
  20. np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
  21. if embedding != 'random' else None
  22. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
  23. self.dropout = 0.5 # 随机失活 丢弃率
  24. self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
  25. self.num_classes = len(self.class_list) # 类别数
  26. self.n_vocab = 0 # 词表大小,在运行时赋值
  27. self.num_epochs = 20 # epoch数
  28. self.batch_size = 128 # mini-batch大小
  29. self.pad_size = 32 # 每句话处理成的长度(短填长切)
  30. self.learning_rate = 1e-3 # 学习率
  31. self.embed = self.embedding_pretrained.size(1)\
  32. if self.embedding_pretrained is not None else 300 # 字向量维度
  33. self.hidden_size = 256 # 隐藏层大小
  34. self.n_gram_vocab = 250499 #n-gram词表大小
  • 模型定义类
  1. class Model(nn.Module):
  2. def __init__(self, config):
  3. super(Model, self).__init__()
  4. if config.embedding_pretrained is not None: #加载初始化好的预训练词/字嵌入矩阵 微调funetuning
  5. self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
  6. else: #否则随机初始化词/字嵌入矩阵 指定填充对应的索引
  7. self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
  8. #分别随机初始化 bi-gram tri-gram对应的词嵌入矩阵
  9. self.embedding_ngram2 = nn.Embedding(config.n_gram_vocab, config.embed)
  10. self.embedding_ngram3 = nn.Embedding(config.n_gram_vocab, config.embed)
  11. #dropout
  12. self.dropout = nn.Dropout(config.dropout)
  13. #隐层
  14. self.fc1 = nn.Linear(config.embed * 3, config.hidden_size)
  15. # self.dropout2 = nn.Dropout(config.dropout)
  16. #输出层
  17. self.fc2 = nn.Linear(config.hidden_size, config.num_classes)
  18. def forward(self, x):
  19. #x (uni-gram,seq_len,bi-gram,tri-gram)
  20. #基于uni-gram、bi-gram、tri-gram对应的索引 在各自的词嵌入矩阵中查询 得到词嵌入
  21. #(batch,seq_len,embed)
  22. out_word = self.embedding(x[0])
  23. out_bigram = self.embedding_ngram2(x[2])
  24. out_trigram = self.embedding_ngram3(x[3])
  25. #三种嵌入进行拼接 (batch,seq,embed*3)
  26. out = torch.cat((out_word, out_bigram, out_trigram), -1)
  27. #沿长度维 作平均 (batch,embed*3)
  28. out = out.mean(dim=1)
  29. #通过fropout
  30. out = self.dropout(out)
  31. #通过隐层 (batch,hidden_size)
  32. out = self.fc1(out)
  33. out = F.relu(out)
  34. #输出层 (batch,classes)
  35. out = self.fc2(out)
  36. return out

 

2. TextCNN

  • 配置类
  1. class Config(object):
  2. """TextCNN配置参数"""
  3. def __init__(self, dataset, embedding):
  4. self.model_name = 'TextCNN'
  5. # 训练集、验证集、测试集路径
  6. self.train_path = dataset + '/data/train.txt'
  7. self.dev_path = dataset + '/data/dev.txt'
  8. self.test_path = dataset + '/data/test.txt'
  9. # 数据集的所有类别
  10. self.class_list = [x.strip() for x in open(
  11. dataset + '/data/class.txt').readlines()]
  12. # 构建好的词/字典路径
  13. self.vocab_path = dataset + '/data/vocab.pkl'
  14. # 训练好的模型参数保存路径
  15. self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
  16. # 模型日志保存路径
  17. self.log_path = dataset + '/log/' + self.model_name
  18. # 如果词/字嵌入矩阵不随机初始化 则加载初始化好的词/字嵌入矩阵 类别为float32 并转换为tensor 否则为None
  19. self.embedding_pretrained = torch.tensor(
  20. np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32')) \
  21. if embedding != 'random' else None
  22. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
  23. self.dropout = 0.5 # 随机失活
  24. self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
  25. self.num_classes = len(self.class_list) # 类别数
  26. self.n_vocab = 0 # 词表大小,在运行时赋值
  27. self.num_epochs = 20 # epoch数
  28. self.batch_size = 128 # mini-batch大小
  29. self.pad_size = 32 # 每句话处理成的长度(短填长切)
  30. self.learning_rate = 1e-3 # 学习率
  31. self.embed = self.embedding_pretrained.size(1)\
  32. if self.embedding_pretrained is not None else 300 # 字向量维度
  33. self.filter_sizes = (2, 3, 4) # 不同大小卷积核尺寸
  34. self.num_filters = 256 # 每种卷积核数量
  • 模型定义类
  1. class Model(nn.Module):
  2. def __init__(self, config):
  3. super(Model, self).__init__()
  4. if config.embedding_pretrained is not None: # 加载初始化好的预训练词/字嵌入矩阵 微调funetuning
  5. self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
  6. else: # 否则随机初始化词/字嵌入矩阵 指定填充对应的索引
  7. self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
  8. #不同大小卷积核对应的卷积操作
  9. self.convs = nn.ModuleList(
  10. [nn.Conv2d(1, config.num_filters, (k, config.embed)) for k in config.filter_sizes])
  11. self.dropout = nn.Dropout(config.dropout)
  12. self.fc = nn.Linear(config.num_filters * len(config.filter_sizes), config.num_classes)
  13. def conv_and_pool(self, x, conv):
  14. x = F.relu(conv(x)).squeeze(3) #(batch,num_filters,height)
  15. x = F.max_pool1d(x, x.size(2)).squeeze(2) #(batch,num_filters) 全局最大池化
  16. return x
  17. def forward(self, x):
  18. out = self.embedding(x[0]) #(batch,seq) -> (batch,seq,embed)
  19. out = out.unsqueeze(1) #添加通道维 (batch,1,seq,embed)
  20. #通过不同大小的卷积核提取特征 并对池化结果进行拼接 (batch,num_filters*len(filter_size))
  21. out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)
  22. out = self.dropout(out)
  23. out = self.fc(out) #(batch,classes)
  24. return out

3. TextRNN

  • 配置类
  1. class Config(object):
  2. """配置参数"""
  3. def __init__(self, dataset, embedding):
  4. self.model_name = 'TextRNN'
  5. # 训练集、验证集、测试集路径
  6. self.train_path = dataset + '/data/train.txt'
  7. self.dev_path = dataset + '/data/dev.txt'
  8. self.test_path = dataset + '/data/test.txt'
  9. # 数据集的所有类别
  10. self.class_list = [x.strip() for x in open(
  11. dataset + '/data/class.txt').readlines()]
  12. # 构建好的词/字典路径
  13. self.vocab_path = dataset + '/data/vocab.pkl'
  14. # 训练好的模型参数保存路径
  15. self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
  16. # 模型日志保存路径
  17. self.log_path = dataset + '/log/' + self.model_name
  18. # 如果词/字嵌入矩阵不随机初始化 则加载初始化好的词/字嵌入矩阵 类别为float32 并转换为tensor 否则为None
  19. self.embedding_pretrained = torch.tensor(
  20. np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32')) \
  21. if embedding != 'random' else None
  22. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
  23. self.dropout = 0.5 # 随机失活
  24. self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
  25. self.num_classes = len(self.class_list) # 类别数
  26. self.n_vocab = 0 # 词表大小,在运行时赋值
  27. self.num_epochs = 10 # epoch数
  28. self.batch_size = 128 # mini-batch大小
  29. self.pad_size = 32 # 每句话处理成的长度(短填长切)
  30. self.learning_rate = 1e-3 # 学习率
  31. self.embed = self.embedding_pretrained.size(1)\
  32. if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
  33. self.hidden_size = 128 # lstm隐藏单元数
  34. self.num_layers = 2 # lstm层数
  • 模型定义类
  1. class Model(nn.Module):
  2. def __init__(self, config):
  3. super(Model, self).__init__()
  4. if config.embedding_pretrained is not None: # 加载初始化好的预训练词/字嵌入矩阵 微调funetuning
  5. self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
  6. else: # 否则随机初始化词/字嵌入矩阵 指定填充对应的索引
  7. self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
  8. #2层双向lstm batch_size为第一维度
  9. self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
  10. bidirectional=True, batch_first=True, dropout=config.dropout)
  11. #输出层
  12. self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)
  13. def forward(self, x):
  14. x, _ = x #(batch,SEQ_LEN)
  15. out = self.embedding(x) # [batch_size, seq_len, embeding]=[128, 32, 300]
  16. out, _ = self.lstm(out) #(batch_size,seq_len,hidden_size*2)
  17. out = self.fc(out[:, -1, :]) # 句子最后时刻的 hidden state (batch,hidden_size*2)->(batch,classes)
  18. return out
  19. '''变长RNN,效果差不多,甚至还低了点...'''
  20. # def forward(self, x):
  21. # x, seq_len = x
  22. # out = self.embedding(x)
  23. # _, idx_sort = torch.sort(seq_len, dim=0, descending=True) # 长度从长到短排序(index)
  24. # _, idx_unsort = torch.sort(idx_sort) # 排序后,原序列的 index
  25. # out = torch.index_select(out, 0, idx_sort)
  26. # seq_len = list(seq_len[idx_sort])
  27. # out = nn.utils.rnn.pack_padded_sequence(out, seq_len, batch_first=True)
  28. # # [batche_size, seq_len, num_directions * hidden_size]
  29. # out, (hn, _) = self.lstm(out)
  30. # out = torch.cat((hn[2], hn[3]), -1)
  31. # # out, _ = nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
  32. # out = out.index_select(0, idx_unsort)
  33. # out = self.fc(out)
  34. # return out

4. TextRCNN

  1. class Config(object):
  2. """配置参数"""
  3. def __init__(self, dataset, embedding):
  4. self.model_name = 'TextRCNN'
  5. # 训练集、验证集、测试集路径
  6. self.train_path = dataset + '/data/train.txt'
  7. self.dev_path = dataset + '/data/dev.txt'
  8. self.test_path = dataset + '/data/test.txt'
  9. # 数据集的所有类别
  10. self.class_list = [x.strip() for x in open(
  11. dataset + '/data/class.txt').readlines()]
  12. # 构建好的词/字典路径
  13. self.vocab_path = dataset + '/data/vocab.pkl'
  14. # 训练好的模型参数保存路径
  15. self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
  16. # 模型日志保存路径
  17. self.log_path = dataset + '/log/' + self.model_name
  18. # 如果词/字嵌入矩阵不随机初始化 则加载初始化好的词/字嵌入矩阵 类别为float32 并转换为tensor 否则为None
  19. self.embedding_pretrained = torch.tensor(
  20. np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32')) \
  21. if embedding != 'random' else None
  22. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
  23. self.dropout = 1.0 # 随机失活
  24. self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
  25. self.num_classes = len(self.class_list) # 类别数
  26. self.n_vocab = 0 # 词表大小,在运行时赋值
  27. self.num_epochs = 10 # epoch数
  28. self.batch_size = 128 # mini-batch大小
  29. self.pad_size = 32 # 每句话处理成的长度(短填长切)
  30. self.learning_rate = 1e-3 # 学习率
  31. self.embed = self.embedding_pretrained.size(1)\
  32. if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
  33. self.hidden_size = 256 # lstm隐藏单元数
  34. self.num_layers = 1 # lstm层数
  • 模型定义类
  1. class Model(nn.Module):
  2. def __init__(self, config):
  3. super(Model, self).__init__()
  4. if config.embedding_pretrained is not None: # 加载初始化好的预训练词/字嵌入矩阵 微调funetuning
  5. self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
  6. else: # 否则随机初始化词/字嵌入矩阵 指定填充对应的索引
  7. self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
  8. #单层双向lstm batch_size为第一维度
  9. self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
  10. bidirectional=True, batch_first=True, dropout=config.dropout)
  11. self.maxpool = nn.MaxPool1d(config.pad_size) #沿长度方向作全局最大池化
  12. #输出层
  13. self.fc = nn.Linear(config.hidden_size * 2 + config.embed, config.num_classes)
  14. def forward(self, x):
  15. x, _ = x #(batch,seq_len)
  16. embed = self.embedding(x) # [batch_size, seq_len, embeding]=[64, 32, 64]
  17. out, _ = self.lstm(embed) #(batch_size,seq_len,hidden_size*2)
  18. out = torch.cat((embed, out), 2) #把词嵌入和lstm输出进行拼接 (batch,seq_len.embed+hidden_size*2)
  19. out = F.relu(out)
  20. out = out.permute(0, 2, 1) #(batch,embed+hidden_size*2,seq_len)
  21. out = self.maxpool(out).squeeze() #(batch,embed+hidden_size*2)
  22. out = self.fc(out) #(batch,classes)
  23. return out

 

5. TextRNN_Atten

  • 配置类
  1. class Config(object):
  2. """配置参数"""
  3. def __init__(self, dataset, embedding):
  4. self.model_name = 'TextRNN_Att'
  5. # 训练集、验证集、测试集路径
  6. self.train_path = dataset + '/data/train.txt'
  7. self.dev_path = dataset + '/data/dev.txt'
  8. self.test_path = dataset + '/data/test.txt'
  9. # 数据集的所有类别
  10. self.class_list = [x.strip() for x in open(
  11. dataset + '/data/class.txt').readlines()]
  12. # 构建好的词/字典路径
  13. self.vocab_path = dataset + '/data/vocab.pkl'
  14. # 训练好的模型参数保存路径
  15. self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
  16. # 模型日志保存路径
  17. self.log_path = dataset + '/log/' + self.model_name
  18. # 如果词/字嵌入矩阵不随机初始化 则加载初始化好的词/字嵌入矩阵 类别为float32 并转换为tensor 否则为None
  19. self.embedding_pretrained = torch.tensor(
  20. np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32')) \
  21. if embedding != 'random' else None
  22. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
  23. self.dropout = 0.5 # 随机失活
  24. self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
  25. self.num_classes = len(self.class_list) # 类别数
  26. self.n_vocab = 0 # 词表大小,在运行时赋值
  27. self.num_epochs = 10 # epoch数
  28. self.batch_size = 128 # mini-batch大小
  29. self.pad_size = 32 # 每句话处理成的长度(短填长切)
  30. self.learning_rate = 1e-3 # 学习率
  31. self.embed = self.embedding_pretrained.size(1)\
  32. if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
  33. self.hidden_size = 128 # lstm隐藏单元数
  34. self.num_layers = 2 # lstm层数
  35. self.hidden_size2 = 64 #全连接层隐藏单元数
  • 模型定义类
  1. class Model(nn.Module):
  2. def __init__(self, config):
  3. super(Model, self).__init__()
  4. if config.embedding_pretrained is not None: # 加载初始化好的预训练词/字嵌入矩阵 微调funetuning
  5. self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
  6. else: # 否则随机初始化词/字嵌入矩阵 指定填充对应的索引
  7. self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
  8. #2层双向 LSTM batch_size为第一维度
  9. self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
  10. bidirectional=True, batch_first=True, dropout=config.dropout)
  11. self.tanh1 = nn.Tanh()
  12. # self.u = nn.Parameter(torch.Tensor(config.hidden_size * 2, config.hidden_size * 2))
  13. #定义一个参数向量 作为Query
  14. self.w = nn.Parameter(torch.Tensor(config.hidden_size * 2))
  15. self.tanh2 = nn.Tanh()
  16. #隐层
  17. self.fc1 = nn.Linear(config.hidden_size * 2, config.hidden_size2)
  18. #输出层
  19. self.fc = nn.Linear(config.hidden_size2, config.num_classes)
  20. def forward(self, x):
  21. x, _ = x #(batch,seq_len)
  22. emb = self.embedding(x) # [batch_size, seq_len, embeding]=[128, 32, 300]
  23. H, _ = self.lstm(emb) # [batch_size, seq_len, hidden_size * num_direction]=[128, 32, 256] 各时刻隐状态 作为value
  24. M = self.tanh1(H) # [128, 32, 256] 各时刻隐状态通过tanh激活函数 作为Key
  25. # M = torch.tanh(torch.matmul(H, self.u))
  26. #Key和Query作运算 在通过softmax 得到每个时刻对应的权重
  27. alpha = F.softmax(torch.matmul(M, self.w), dim=1).unsqueeze(-1) # [128, 32, 1]
  28. #各时刻的权重和各时刻的隐状态Value对应相乘
  29. out = H * alpha # [128, 32, 256]
  30. #再相加
  31. out = torch.sum(out, 1) # [128, 256] (batch,hidden*2)
  32. out = F.relu(out)
  33. out = self.fc1(out) #(batch,hidden2)
  34. out = self.fc(out) # (batch,classes)
  35. return out

 

6. DPCNN

  • 配置类
  1. class Config(object):
  2. """配置参数"""
  3. def __init__(self, dataset, embedding):
  4. self.model_name = 'DPCNN'
  5. # 训练集、验证集、测试集路径
  6. self.train_path = dataset + '/data/train.txt'
  7. self.dev_path = dataset + '/data/dev.txt'
  8. self.test_path = dataset + '/data/test.txt'
  9. # 数据集的所有类别
  10. self.class_list = [x.strip() for x in open(
  11. dataset + '/data/class.txt').readlines()]
  12. # 构建好的词/字典路径
  13. self.vocab_path = dataset + '/data/vocab.pkl'
  14. # 训练好的模型参数保存路径
  15. self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
  16. # 模型日志保存路径
  17. self.log_path = dataset + '/log/' + self.model_name
  18. # 如果词/字嵌入矩阵不随机初始化 则加载初始化好的词/字嵌入矩阵 类别为float32 并转换为tensor 否则为None
  19. self.embedding_pretrained = torch.tensor(
  20. np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32')) \
  21. if embedding != 'random' else None
  22. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
  23. self.dropout = 0.5 # 随机失活
  24. self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
  25. self.num_classes = len(self.class_list) # 类别数
  26. self.n_vocab = 0 # 词表大小,在运行时赋值
  27. self.num_epochs = 20 # epoch数
  28. self.batch_size = 128 # mini-batch大小
  29. self.pad_size = 32 # 每句话处理成的长度(短填长切)
  30. self.learning_rate = 1e-3 # 学习率
  31. self.embed = self.embedding_pretrained.size(1)\
  32. if self.embedding_pretrained is not None else 300 # 字/词向量维度
  33. self.num_filters = 250 # 卷积核数量(channels数)
  • 模型定义类
  1. class Model(nn.Module):
  2. def __init__(self, config):
  3. super(Model, self).__init__()
  4. if config.embedding_pretrained is not None: # 加载初始化好的预训练词/字嵌入矩阵 微调funetuning
  5. self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
  6. else: # 否则随机初始化词/字嵌入矩阵 指定填充对应的索引
  7. self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
  8. #region embedding 类似于TextCNN中的卷积操作
  9. self.conv_region = nn.Conv2d(1, config.num_filters, (3, config.embed), stride=1)
  10. self.conv = nn.Conv2d(config.num_filters, config.num_filters, (3, 1), stride=1)
  11. self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)
  12. self.padding1 = nn.ZeroPad2d((0, 0, 1, 1)) # top bottom 上下各添加1个0
  13. self.padding2 = nn.ZeroPad2d((0, 0, 0, 1)) # bottom 下添加一个0
  14. self.relu = nn.ReLU()
  15. self.fc = nn.Linear(config.num_filters, config.num_classes)
  16. def forward(self, x):
  17. x = x[0] #(batch,seq_len)
  18. x = self.embedding(x) #(batch,seq_len,embed)
  19. x = x.unsqueeze(1) # 添加通道维 进行2d卷积 (batch,1,seq_len,embed)
  20. x = self.conv_region(x) # (batch,num_filters,seq_len-3+1,1)
  21. #先卷积 再填充 等价于等长卷积 序列长度不变
  22. x = self.padding1(x) # [batch_size, num_filters, seq_len, 1]
  23. x = self.relu(x)
  24. x = self.conv(x) # [batch_size, num_filters, seq_len-3+1, 1]
  25. x = self.padding1(x) # [batch_size, num_filters, seq_len, 1]
  26. x = self.relu(x)
  27. x = self.conv(x) # [batch_size, num_filters, seq_len-3+1, 1]
  28. while x.size()[2] > 2:
  29. x = self._block(x)
  30. x = x.squeeze() # [batch_size, num_filters]
  31. x = self.fc(x) #(batch,classes)
  32. return x
  33. def _block(self, x):
  34. x = self.padding2(x) #[batch_size, num_filters, seq_len-1, 1]
  35. #长度减半
  36. px = self.max_pool(x) #[batch_size, num_filters, (seq_len-1)/2, 1]
  37. #等长卷积 长度不变
  38. x = self.padding1(px)
  39. x = F.relu(x)
  40. x = self.conv(x)
  41. # 等长卷积 长度不变
  42. x = self.padding1(x)
  43. x = F.relu(x)
  44. x = self.conv(x)
  45. # Short Cut
  46. x = x + px
  47. return x

7. Transformer

  • 配置类
  1. class Config(object):
  2. """配置参数"""
  3. def __init__(self, dataset, embedding):
  4. self.model_name = 'Transformer'
  5. # 训练集、验证集、测试集路径
  6. self.train_path = dataset + '/data/train.txt'
  7. self.dev_path = dataset + '/data/dev.txt'
  8. self.test_path = dataset + '/data/test.txt'
  9. # 数据集的所有类别
  10. self.class_list = [x.strip() for x in open(
  11. dataset + '/data/class.txt').readlines()]
  12. # 构建好的词/字典路径
  13. self.vocab_path = dataset + '/data/vocab.pkl'
  14. # 训练好的模型参数保存路径
  15. self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'
  16. # 模型日志保存路径
  17. self.log_path = dataset + '/log/' + self.model_name
  18. # 如果词/字嵌入矩阵不随机初始化 则加载初始化好的词/字嵌入矩阵 类别为float32 并转换为tensor 否则为None
  19. self.embedding_pretrained = torch.tensor(
  20. np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32')) \
  21. if embedding != 'random' else None
  22. self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
  23. self.dropout = 0.5 # 随机失活
  24. self.require_improvement = 2000 # 若超过2000batch效果还没提升,则提前结束训练
  25. self.num_classes = len(self.class_list) # 类别数
  26. self.n_vocab = 0 # 词表大小,在运行时赋值
  27. self.num_epochs = 20 # epoch数
  28. self.batch_size = 128 # mini-batch大小
  29. self.pad_size = 32 # 每句话处理成的长度(短填长切)
  30. self.learning_rate = 5e-4 # 学习率
  31. self.embed = self.embedding_pretrained.size(1)\
  32. if self.embedding_pretrained is not None else 300 # 字向量维度
  33. self.dim_model = 300
  34. self.hidden = 1024
  35. self.last_hidden = 512
  36. self.num_head = 5 #5头注意力机制
  37. self.num_encoder = 2 #两个transformer encoder block
  • 模型定义类
  1. class Model(nn.Module):
  2. def __init__(self, config):
  3. super(Model, self).__init__()
  4. #词/字嵌入
  5. if config.embedding_pretrained is not None: # 加载初始化好的预训练词/字嵌入矩阵 微调funetuning
  6. self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
  7. else: # 否则随机初始化词/字嵌入矩阵 指定填充对应的索引
  8. self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
  9. #位置编码
  10. self.postion_embedding = Positional_Encoding(config.embed, config.pad_size, config.dropout, config.device)
  11. #transformer encoder block
  12. self.encoder = Encoder(config.dim_model, config.num_head, config.hidden, config.dropout)
  13. #多个transformer encoder block
  14. self.encoders = nn.ModuleList([
  15. copy.deepcopy(self.encoder)
  16. # Encoder(config.dim_model, config.num_head, config.hidden, config.dropout)
  17. for _ in range(config.num_encoder)])
  18. #输出层
  19. self.fc1 = nn.Linear(config.pad_size * config.dim_model, config.num_classes)
  20. # self.fc2 = nn.Linear(config.last_hidden, config.num_classes)
  21. # self.fc1 = nn.Linear(config.dim_model, config.num_classes)
  22. def forward(self, x):
  23. out = self.embedding(x[0]) #(batch,seq_len) -> (batch,seq_len,embed)
  24. out = self.postion_embedding(out) # 添加位置编码 (batch,seq_len,embed)
  25. for encoder in self.encoders: #通过多个ender block
  26. out = encoder(out) #(batch,seq_len,dim_model)
  27. out = out.view(out.size(0), -1) #(batch,seq_len*dim_model)
  28. # out = torch.mean(out, 1)
  29. out = self.fc1(out) #(batch,classes)
  30. return out
  31. class Encoder(nn.Module):
  32. def __init__(self, dim_model, num_head, hidden, dropout):
  33. super(Encoder, self).__init__()
  34. #多头注意力机制
  35. self.attention = Multi_Head_Attention(dim_model, num_head, dropout)
  36. #两个全连接层
  37. self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden, dropout)
  38. def forward(self, x): #x (batch,seq_len,embed_size) embed_size = dim_model
  39. out = self.attention(x) #计算多头注意力结果 (batch,seq_len,dim_model)
  40. out = self.feed_forward(out) #通过两个全连接层增加 非线性转换能力 (batch,seq_len,dim_model)
  41. return out
  42. class Positional_Encoding(nn.Module):
  43. #位置编码
  44. def __init__(self, embed, pad_size, dropout, device):
  45. super(Positional_Encoding, self).__init__()
  46. self.device = device
  47. #利用sin cos生成绝对位置编码
  48. self.pe = torch.tensor([[pos / (10000.0 ** (i // 2 * 2.0 / embed)) for i in range(embed)] for pos in range(pad_size)])
  49. self.pe[:, 0::2] = np.sin(self.pe[:, 0::2])
  50. self.pe[:, 1::2] = np.cos(self.pe[:, 1::2])
  51. self.dropout = nn.Dropout(dropout)
  52. def forward(self, x):
  53. #token embedding + 绝对位置编码
  54. out = x + nn.Parameter(self.pe, requires_grad=False).to(self.device)
  55. #再通过dropout
  56. out = self.dropout(out)
  57. return out
  58. class Scaled_Dot_Product_Attention(nn.Module):
  59. '''Scaled Dot-Product Attention '''
  60. def __init__(self):
  61. super(Scaled_Dot_Product_Attention, self).__init__()
  62. def forward(self, Q, K, V, scale=None):
  63. '''
  64. Args:
  65. Q: [batch_size, len_Q, dim_Q]
  66. K: [batch_size, len_K, dim_K]
  67. V: [batch_size, len_V, dim_V]
  68. scale: 缩放因子 论文为根号dim_K
  69. Return:
  70. self-attention后的张量,以及attention张量
  71. '''
  72. #Q与K的第2、3维转置计算内积 (batch*num_head,seq_len,seq_len)
  73. attention = torch.matmul(Q, K.permute(0, 2, 1))
  74. if scale: #作缩放 减小结果的方差
  75. attention = attention * scale
  76. # if mask: # TODO change this
  77. # attention = attention.masked_fill_(mask == 0, -1e9)
  78. attention = F.softmax(attention, dim=-1) #转换为权重
  79. context = torch.matmul(attention, V) #再与V运算 得到结果 (batch*num_head,seq_len,dim_head)
  80. return context
  81. class Multi_Head_Attention(nn.Module):
  82. #多头注意力机制 encoder block的第一部分
  83. def __init__(self, dim_model, num_head, dropout=0.0):
  84. super(Multi_Head_Attention, self).__init__()
  85. self.num_head = num_head #头数
  86. assert dim_model % num_head == 0 #必须整除
  87. self.dim_head = dim_model // self.num_head
  88. #分别通过三个Dense层 生成Q、K、V
  89. self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
  90. self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
  91. self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
  92. #Attention计算
  93. self.attention = Scaled_Dot_Product_Attention()
  94. self.fc = nn.Linear(num_head * self.dim_head, dim_model)
  95. self.dropout = nn.Dropout(dropout)
  96. #层归一化
  97. self.layer_norm = nn.LayerNorm(dim_model)
  98. def forward(self, x): #(batch,seq_len,dim_model)
  99. batch_size = x.size(0)
  100. # Q,K,V维度 (batch,seq_len,dim_head*num_head)
  101. Q = self.fc_Q(x)
  102. K = self.fc_K(x)
  103. V = self.fc_V(x)
  104. #沿第三个维度进行切分 切分为num_head份 再沿第一个维度拼接 多个注意力头并行计算
  105. #Q,K,V维度 (batch*num_head,seq_len,dim_head)
  106. Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
  107. K = K.view(batch_size * self.num_head, -1, self.dim_head)
  108. V = V.view(batch_size * self.num_head, -1, self.dim_head)
  109. # if mask: # TODO
  110. # mask = mask.repeat(self.num_head, 1, 1) # TODO change this
  111. scale = K.size(-1) ** -0.5 # 缩放因子 dim_head的开放取倒数 对内积结果进行缩放 减小结果的方差 有利于训练
  112. #attention计算 多个注意力头并行计算(矩阵运算)
  113. context = self.attention(Q, K, V, scale)
  114. #多头注意力计算结果 沿第一个维度进行切分 再沿第三个维度拼接 转为原来的维度(batch,seq_len,dim_head*num_head)
  115. context = context.view(batch_size, -1, self.dim_head * self.num_head)
  116. out = self.fc(context)#(batch,seq_len,dim_model)
  117. out = self.dropout(out)
  118. out = out + x # 残差连接
  119. out = self.layer_norm(out)
  120. return out
  121. class Position_wise_Feed_Forward(nn.Module):
  122. #encoder block的第二部分
  123. def __init__(self, dim_model, hidden, dropout=0.0):
  124. #定义两个全连接层 多头注意力的计算结果 通过两个全连接层 增加非线性
  125. super(Position_wise_Feed_Forward, self).__init__()
  126. self.fc1 = nn.Linear(dim_model, hidden)
  127. self.fc2 = nn.Linear(hidden, dim_model)
  128. self.dropout = nn.Dropout(dropout)
  129. self.layer_norm = nn.LayerNorm(dim_model)
  130. def forward(self, x): #(batch,seq_len,dim_model)
  131. out = self.fc1(x) #(batch,seq_len,hidden)
  132. out = F.relu(out)
  133. out = self.fc2(out) #(batch,seq_len,dim_model)
  134. out = self.dropout(out)
  135. out = out + x # 残差连接
  136. out = self.layer_norm(out) #层归一化
  137. return out

 

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/你好赵伟/article/detail/514365
推荐阅读
相关标签
  

闽ICP备14008679号