123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113 |
- import math
- import torch
- class PositionalEncoding(torch.nn.Module):
- """Positional encoding.
- Args:
- d_model (int): Embedding dimension.
- dropout_rate (float): Dropout rate.
- max_len (int): Maximum input length.
- reverse (bool): Whether to reverse the input position.
- """
- def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
- """Construct an PositionalEncoding object."""
- super(PositionalEncoding, self).__init__()
- self.d_model = d_model
- self.reverse = reverse
- self.xscale = math.sqrt(self.d_model)
- self.dropout = torch.nn.Dropout(p=dropout_rate)
- self.pe = None
- self.extend_pe(torch.tensor(0.0).expand(1, max_len))
- def extend_pe(self, x):
- """Reset the positional encodings."""
- if self.pe is not None:
- if self.pe.size(1) >= x.size(1):
- if self.pe.dtype != x.dtype or self.pe.device != x.device:
- self.pe = self.pe.to(dtype=x.dtype, device=x.device)
- return
- pe = torch.zeros(x.size(1), self.d_model)
- if self.reverse:
- position = torch.arange(
- x.size(1) - 1, -1, -1.0, dtype=torch.float32
- ).unsqueeze(1)
- else:
- position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
- div_term = torch.exp(
- torch.arange(0, self.d_model, 2, dtype=torch.float32)
- * -(math.log(10000.0) / self.d_model)
- )
- pe[:, 0::2] = torch.sin(position * div_term)
- pe[:, 1::2] = torch.cos(position * div_term)
- pe = pe.unsqueeze(0)
- self.pe = pe.to(device=x.device, dtype=x.dtype)
- def forward(self, x: torch.Tensor):
- """Add positional encoding.
- Args:
- x (torch.Tensor): Input tensor (batch, time, `*`).
- Returns:
- torch.Tensor: Encoded tensor (batch, time, `*`).
- """
- self.extend_pe(x)
- x = x * self.xscale + self.pe[:, : x.size(1)]
- return self.dropout(x)
- class ScaledPositionalEncoding(PositionalEncoding):
- """Scaled positional encoding module.
- See Sec. 3.2 https://arxiv.org/abs/1809.08895
- Args:
- d_model (int): Embedding dimension.
- dropout_rate (float): Dropout rate.
- max_len (int): Maximum input length.
- """
- def __init__(self, d_model, dropout_rate, max_len=5000):
- """Initialize class."""
- super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)
- self.alpha = torch.nn.Parameter(torch.tensor(1.0))
- def reset_parameters(self):
- """Reset parameters."""
- self.alpha.data = torch.tensor(1.0)
- def forward(self, x):
- """Add positional encoding.
- Args:
- x (torch.Tensor): Input tensor (batch, time, `*`).
- Returns:
- torch.Tensor: Encoded tensor (batch, time, `*`).
- """
- self.extend_pe(x)
- x = x + self.alpha * self.pe[:, : x.size(1)]
- return self.dropout(x)
- class RelPositionalEncoding(PositionalEncoding):
- """Relative positional encoding module.
- See : Appendix B in https://arxiv.org/abs/1901.02860
- Args:
- d_model (int): Embedding dimension.
- dropout_rate (float): Dropout rate.
- max_len (int): Maximum input length.
- """
- def __init__(self, d_model, dropout_rate, max_len=5000):
- """Initialize class."""
- super().__init__(d_model, dropout_rate, max_len, reverse=True)
- def forward(self, x):
- """Compute positional encoding.
- Args:
- x (torch.Tensor): Input tensor (batch, time, `*`).
- Returns:
- torch.Tensor: Encoded tensor (batch, time, `*`).
- torch.Tensor: Positional embedding tensor (1, time, `*`).
- """
- self.extend_pe(x)
- x = x * self.xscale
- pos_emb = self.pe[:, : x.size(1)]
- return self.dropout(x), self.dropout(pos_emb)
|