赞
踩
import math
from typing import Optional, List
import torch
from torch import nn
class PrepareForMultiHeadAttention(nn.Module):
“”"
## Prepare for multi-head attention
“”"
def __init__(self, d_model: int, heads: int, d_k: int, bias: bool):
super().__init__()
self.linear = nn.Linear(d_model, heads * d_k, bias=bias)
self.heads = heads
self.d_k = d_k
def forward(self, x: torch.Tensor):
head_shape = x.shape[:-1]
x = self.linear(x)
# Split last dimension into heads
x = x.view(*head_shape, self.heads, self.d_k)
# Output has shape `[seq_len, batch_size, heads, d_k]` or `[batch_size, heads, d_model]`
return x
class MultiHeadAttention(nn.Module):
r"“”
## Multi-Head Attention Module
This computes scaled multi-headed attention for given `query`, `key` and `value` vectors. """ def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1, bias: bool = True): """ * `heads` is the number of heads. * `d_model` is the number of features in the `query`, `key` and `value` vectors. """ super().__init__() # Number of features per head self.d_k = d_model // heads self.heads = heads # These transform the `query`, `key` and `value` vectors for multi-headed attention. self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias) self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias) self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=True) # Softmax for attention along the time dimension of `key` self.softmax = nn.Softmax(dim=1) self.output = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout_prob) self.scale = 1 / math.sqrt(self.d_k) # We store attentions so that it can be used for logging, or other computations if needed self.attn = None def get_scores(self, query: torch.Tensor, key: torch.Tensor): """ ### Calculate scores between queries and keys This method can be overridden for other variations like relative attention. """ return torch.einsum('ibhd,jbhd->ijbh', query, key) def prepare_mask(self, mask: torch.Tensor, query_shape: List[int], key_shape: List[int]): """ `mask` has shape `[seq_len_q, seq_len_k, batch_size]`, where first dimension is the query dimension. If the query dimension is equal to 1 it will be broadcasted. """ assert mask.shape[0] == 1 or mask.shape[0] == query_shape[0] assert mask.shape[1] == key_shape[0] assert mask.shape[2] == 1 or mask.shape[2] == query_shape[1] # Same mask applied to all heads. mask = mask.unsqueeze(-1) # resulting mask has shape `[seq_len_q, seq_len_k, batch_size, heads]` return mask def forward(self, *, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[torch.Tensor] = None): """ `query`, `key` and `value` are the tensors that store collection of *query*, *key* and *value* vectors. They have shape `[seq_len, batch_size, d_model]`. `mask` has shape `[seq_len, seq_len, batch_size]` and `mask[i, j, b]` indicates whether for batch `b`, query at position `i` has access to key-value at position `j`. """ # `query`, `key` and `value` have shape `[seq_len, batch_size, d_model]` seq_len, batch_size, _ = query.shape if mask is not None: mask = self.prepare_mask(mask, query.shape, key.shape) # Prepare `query`, `key` and `value` for attention computation. # These will then have shape `[seq_len, batch_size, heads, d_k]`. query = self.query(query) key = self.key(key) value = self.value(value) # This gives a tensor of shape `[seq_len, seq_len, batch_size, heads]`. scores = self.get_scores(query, key) scores *= self.scale if mask is not None: scores = scores.masked_fill(mask == 0, float('-inf')) attn = self.softmax(scores) # Apply dropout attn = self.dropout(attn) # Multiply by values x = torch.einsum("ijbh,jbhd->ibhd", attn, value) # Save attentions for any other calculations self.attn = attn.detach() # Concatenate multiple heads x = x.reshape(seq_len, batch_size, -1) # Output layer return self.output(x)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。