赞
踩
目录
- import torch.nn as nn
- import torch
- import math
- from torch.autograd import Variable
-
- class PositionalEncoder(nn.Module):
- def __init__(self, d_model, max_seq_len = 80):
- super().__init__()
- self.d_model = d_model
- # 根据 pos 和 i 创建一个常量 PE 矩阵
- pe = torch.zeros(max_seq_len, d_model)
- for pos in range(max_seq_len):
- for i in range(0, d_model, 2):
- pe[pos, i] = math.sin(pos / (10000 ** ((2 * i)/d_model)))
- pe[pos, i + 1] = math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
- pe = pe.unsqueeze(0)
- self.register_buffer('pe', pe)
-
- def forward(self, x):
- # 使得单词嵌入表示相对大一些
- x = x * math.sqrt(self.d_model)
- # 增加位置常量到单词嵌入表示中
- seq_len = x.size(1)
- x = x + Variable(self.pe[:,:seq_len], requires_grad=False).cuda()
- return x
-
- pos_en = PositionalEncoder(d_model=64)
- x = torch.randn(100, 1, dtype=torch.float32).cuda() # 100 x 1
- print(pos_en(x).shape) # 100 x 64
- print('')
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。