rel_transformer.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. import math
  2. import torch
  3. from torch import nn
  4. from torch.nn import functional as F
  5. from modules.commons.layers import Embedding
  6. def convert_pad_shape(pad_shape):
  7. l = pad_shape[::-1]
  8. pad_shape = [item for sublist in l for item in sublist]
  9. return pad_shape
  10. def shift_1d(x):
  11. x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
  12. return x
  13. def sequence_mask(length, max_length=None):
  14. if max_length is None:
  15. max_length = length.max()
  16. x = torch.arange(max_length, dtype=length.dtype, device=length.device)
  17. return x.unsqueeze(0) < length.unsqueeze(1)
  18. class Encoder(nn.Module):
  19. def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.,
  20. window_size=None, block_length=None, pre_ln=False, **kwargs):
  21. super().__init__()
  22. self.hidden_channels = hidden_channels
  23. self.filter_channels = filter_channels
  24. self.n_heads = n_heads
  25. self.n_layers = n_layers
  26. self.kernel_size = kernel_size
  27. self.p_dropout = p_dropout
  28. self.window_size = window_size
  29. self.block_length = block_length
  30. self.pre_ln = pre_ln
  31. self.drop = nn.Dropout(p_dropout)
  32. self.attn_layers = nn.ModuleList()
  33. self.norm_layers_1 = nn.ModuleList()
  34. self.ffn_layers = nn.ModuleList()
  35. self.norm_layers_2 = nn.ModuleList()
  36. for i in range(self.n_layers):
  37. self.attn_layers.append(
  38. MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size,
  39. p_dropout=p_dropout, block_length=block_length))
  40. self.norm_layers_1.append(LayerNorm(hidden_channels))
  41. self.ffn_layers.append(
  42. FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
  43. self.norm_layers_2.append(LayerNorm(hidden_channels))
  44. if pre_ln:
  45. self.last_ln = LayerNorm(hidden_channels)
  46. def forward(self, x, x_mask, attn_mask=1):
  47. if isinstance(attn_mask, torch.Tensor):
  48. attn_mask = attn_mask[:, None]
  49. attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) * attn_mask
  50. for i in range(self.n_layers):
  51. x = x * x_mask
  52. x_ = x
  53. if self.pre_ln:
  54. x = self.norm_layers_1[i](x)
  55. y = self.attn_layers[i](x, x, attn_mask)
  56. y = self.drop(y)
  57. x = x_ + y
  58. if not self.pre_ln:
  59. x = self.norm_layers_1[i](x)
  60. x_ = x
  61. if self.pre_ln:
  62. x = self.norm_layers_2[i](x)
  63. y = self.ffn_layers[i](x, x_mask)
  64. y = self.drop(y)
  65. x = x_ + y
  66. if not self.pre_ln:
  67. x = self.norm_layers_2[i](x)
  68. if self.pre_ln:
  69. x = self.last_ln(x)
  70. x = x * x_mask
  71. return x
  72. class MultiHeadAttention(nn.Module):
  73. def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.,
  74. block_length=None, proximal_bias=False, proximal_init=False):
  75. super().__init__()
  76. assert channels % n_heads == 0
  77. self.channels = channels
  78. self.out_channels = out_channels
  79. self.n_heads = n_heads
  80. self.window_size = window_size
  81. self.heads_share = heads_share
  82. self.block_length = block_length
  83. self.proximal_bias = proximal_bias
  84. self.p_dropout = p_dropout
  85. self.attn = None
  86. self.k_channels = channels // n_heads
  87. self.conv_q = nn.Conv1d(channels, channels, 1)
  88. self.conv_k = nn.Conv1d(channels, channels, 1)
  89. self.conv_v = nn.Conv1d(channels, channels, 1)
  90. if window_size is not None:
  91. n_heads_rel = 1 if heads_share else n_heads
  92. rel_stddev = self.k_channels ** -0.5
  93. self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
  94. self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
  95. self.conv_o = nn.Conv1d(channels, out_channels, 1)
  96. self.drop = nn.Dropout(p_dropout)
  97. nn.init.xavier_uniform_(self.conv_q.weight)
  98. nn.init.xavier_uniform_(self.conv_k.weight)
  99. if proximal_init:
  100. self.conv_k.weight.data.copy_(self.conv_q.weight.data)
  101. self.conv_k.bias.data.copy_(self.conv_q.bias.data)
  102. nn.init.xavier_uniform_(self.conv_v.weight)
  103. def forward(self, x, c, attn_mask=None):
  104. q = self.conv_q(x)
  105. k = self.conv_k(c)
  106. v = self.conv_v(c)
  107. x, self.attn = self.attention(q, k, v, mask=attn_mask)
  108. x = self.conv_o(x)
  109. return x
  110. def attention(self, query, key, value, mask=None):
  111. # reshape [b, d, t] -> [b, n_h, t, d_k]
  112. b, d, t_s, t_t = (*key.size(), query.size(2))
  113. query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
  114. key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
  115. value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
  116. scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
  117. if self.window_size is not None:
  118. assert t_s == t_t, "Relative attention is only available for self-attention."
  119. key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
  120. rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings)
  121. rel_logits = self._relative_position_to_absolute_position(rel_logits)
  122. scores_local = rel_logits / math.sqrt(self.k_channels)
  123. scores = scores + scores_local
  124. if self.proximal_bias:
  125. assert t_s == t_t, "Proximal bias is only available for self-attention."
  126. scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
  127. if mask is not None:
  128. scores = scores.masked_fill(mask == 0, -1e4)
  129. if self.block_length is not None:
  130. block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
  131. scores = scores * block_mask + -1e4 * (1 - block_mask)
  132. p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
  133. p_attn = self.drop(p_attn)
  134. output = torch.matmul(p_attn, value)
  135. if self.window_size is not None:
  136. relative_weights = self._absolute_position_to_relative_position(p_attn)
  137. value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
  138. output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
  139. output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
  140. return output, p_attn
  141. def _matmul_with_relative_values(self, x, y):
  142. """
  143. x: [b, h, l, m]
  144. y: [h or 1, m, d]
  145. ret: [b, h, l, d]
  146. """
  147. ret = torch.matmul(x, y.unsqueeze(0))
  148. return ret
  149. def _matmul_with_relative_keys(self, x, y):
  150. """
  151. x: [b, h, l, d]
  152. y: [h or 1, m, d]
  153. ret: [b, h, l, m]
  154. """
  155. ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
  156. return ret
  157. def _get_relative_embeddings(self, relative_embeddings, length):
  158. max_relative_position = 2 * self.window_size + 1
  159. # Pad first before slice to avoid using cond ops.
  160. pad_length = max(length - (self.window_size + 1), 0)
  161. slice_start_position = max((self.window_size + 1) - length, 0)
  162. slice_end_position = slice_start_position + 2 * length - 1
  163. if pad_length > 0:
  164. padded_relative_embeddings = F.pad(
  165. relative_embeddings,
  166. convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
  167. else:
  168. padded_relative_embeddings = relative_embeddings
  169. used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
  170. return used_relative_embeddings
  171. def _relative_position_to_absolute_position(self, x):
  172. """
  173. x: [b, h, l, 2*l-1]
  174. ret: [b, h, l, l]
  175. """
  176. batch, heads, length, _ = x.size()
  177. # Concat columns of pad to shift from relative to absolute indexing.
  178. x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
  179. # Concat extra elements so to add up to shape (len+1, 2*len-1).
  180. x_flat = x.view([batch, heads, length * 2 * length])
  181. x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
  182. # Reshape and slice out the padded elements.
  183. x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:]
  184. return x_final
  185. def _absolute_position_to_relative_position(self, x):
  186. """
  187. x: [b, h, l, l]
  188. ret: [b, h, l, 2*l-1]
  189. """
  190. batch, heads, length, _ = x.size()
  191. # padd along column
  192. x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
  193. x_flat = x.view([batch, heads, -1])
  194. # add 0's in the beginning that will skew the elements after reshape
  195. x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
  196. x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
  197. return x_final
  198. def _attention_bias_proximal(self, length):
  199. """Bias for self-attention to encourage attention to close positions.
  200. Args:
  201. length: an integer scalar.
  202. Returns:
  203. a Tensor with shape [1, 1, length, length]
  204. """
  205. r = torch.arange(length, dtype=torch.float32)
  206. diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
  207. return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
  208. class FFN(nn.Module):
  209. def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None):
  210. super().__init__()
  211. self.in_channels = in_channels
  212. self.out_channels = out_channels
  213. self.filter_channels = filter_channels
  214. self.kernel_size = kernel_size
  215. self.p_dropout = p_dropout
  216. self.activation = activation
  217. self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
  218. self.conv_2 = nn.Conv1d(filter_channels, out_channels, 1)
  219. self.drop = nn.Dropout(p_dropout)
  220. def forward(self, x, x_mask):
  221. x = self.conv_1(x * x_mask)
  222. if self.activation == "gelu":
  223. x = x * torch.sigmoid(1.702 * x)
  224. else:
  225. x = torch.relu(x)
  226. x = self.drop(x)
  227. x = self.conv_2(x * x_mask)
  228. return x * x_mask
  229. class LayerNorm(nn.Module):
  230. def __init__(self, channels, eps=1e-4):
  231. super().__init__()
  232. self.channels = channels
  233. self.eps = eps
  234. self.gamma = nn.Parameter(torch.ones(channels))
  235. self.beta = nn.Parameter(torch.zeros(channels))
  236. def forward(self, x):
  237. n_dims = len(x.shape)
  238. mean = torch.mean(x, 1, keepdim=True)
  239. variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
  240. x = (x - mean) * torch.rsqrt(variance + self.eps)
  241. shape = [1, -1] + [1] * (n_dims - 2)
  242. x = x * self.gamma.view(*shape) + self.beta.view(*shape)
  243. return x
  244. class ConvReluNorm(nn.Module):
  245. def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
  246. super().__init__()
  247. self.in_channels = in_channels
  248. self.hidden_channels = hidden_channels
  249. self.out_channels = out_channels
  250. self.kernel_size = kernel_size
  251. self.n_layers = n_layers
  252. self.p_dropout = p_dropout
  253. assert n_layers > 1, "Number of layers should be larger than 0."
  254. self.conv_layers = nn.ModuleList()
  255. self.norm_layers = nn.ModuleList()
  256. self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
  257. self.norm_layers.append(LayerNorm(hidden_channels))
  258. self.relu_drop = nn.Sequential(
  259. nn.ReLU(),
  260. nn.Dropout(p_dropout))
  261. for _ in range(n_layers - 1):
  262. self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
  263. self.norm_layers.append(LayerNorm(hidden_channels))
  264. self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
  265. self.proj.weight.data.zero_()
  266. self.proj.bias.data.zero_()
  267. def forward(self, x, x_mask):
  268. x_org = x
  269. for i in range(self.n_layers):
  270. x = self.conv_layers[i](x * x_mask)
  271. x = self.norm_layers[i](x)
  272. x = self.relu_drop(x)
  273. x = x_org + self.proj(x)
  274. return x * x_mask
  275. class RelTransformerEncoder(nn.Module):
  276. def __init__(self,
  277. n_vocab,
  278. out_channels,
  279. hidden_channels,
  280. filter_channels,
  281. n_heads,
  282. n_layers,
  283. kernel_size,
  284. p_dropout=0.0,
  285. window_size=4,
  286. block_length=None,
  287. in_channels=None,
  288. prenet=True,
  289. pre_ln=True,
  290. ):
  291. super().__init__()
  292. self.n_vocab = n_vocab
  293. self.out_channels = out_channels
  294. self.hidden_channels = hidden_channels
  295. self.filter_channels = filter_channels
  296. self.n_heads = n_heads
  297. self.n_layers = n_layers
  298. self.kernel_size = kernel_size
  299. self.p_dropout = p_dropout
  300. self.window_size = window_size
  301. self.block_length = block_length
  302. self.prenet = prenet
  303. if n_vocab > 0:
  304. self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0)
  305. if prenet:
  306. if in_channels is None:
  307. in_channels = hidden_channels
  308. self.pre = ConvReluNorm(in_channels, in_channels, in_channels,
  309. kernel_size=5, n_layers=3, p_dropout=0)
  310. if in_channels is not None and in_channels != hidden_channels:
  311. self.encoder_inp_proj = nn.Conv1d(in_channels, hidden_channels, 1)
  312. self.encoder = Encoder(
  313. hidden_channels,
  314. filter_channels,
  315. n_heads,
  316. n_layers,
  317. kernel_size,
  318. p_dropout,
  319. window_size=window_size,
  320. block_length=block_length,
  321. pre_ln=pre_ln,
  322. )
  323. def forward(self, x, x_mask=None, other_embeds=0, attn_mask=1):
  324. if self.n_vocab > 0:
  325. x_lengths = (x > 0).long().sum(-1)
  326. x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
  327. else:
  328. x_lengths = (x.abs().sum(-1) > 0).long().sum(-1)
  329. x = x + other_embeds
  330. x = torch.transpose(x, 1, -1) # [b, h, t]
  331. x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
  332. if self.prenet:
  333. x = self.pre(x, x_mask)
  334. self.prenet_out = x.transpose(1, 2)
  335. if hasattr(self, 'encoder_inp_proj'):
  336. x = self.encoder_inp_proj(x) * x_mask
  337. x = self.encoder(x, x_mask, attn_mask)
  338. return x.transpose(1, 2)