transformer.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. import math
  2. import torch
  3. from torch import nn
  4. from torch.nn import Parameter, Linear
  5. from modules.commons.layers import LayerNorm, Embedding
  6. from utils.nn.seq_utils import get_incremental_state, set_incremental_state, softmax, make_positions
  7. import torch.nn.functional as F
  8. DEFAULT_MAX_SOURCE_POSITIONS = 3000
  9. DEFAULT_MAX_TARGET_POSITIONS = 3000
  10. class SinusoidalPositionalEmbedding(nn.Module):
  11. """This module produces sinusoidal positional embeddings of any length.
  12. Padding symbols are ignored.
  13. """
  14. def __init__(self, embedding_dim, padding_idx, init_size=1024):
  15. super().__init__()
  16. self.embedding_dim = embedding_dim
  17. self.padding_idx = padding_idx
  18. self.weights = SinusoidalPositionalEmbedding.get_embedding(
  19. init_size,
  20. embedding_dim,
  21. padding_idx,
  22. )
  23. self.register_buffer('_float_tensor', torch.FloatTensor(1))
  24. @staticmethod
  25. def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
  26. """Build sinusoidal embeddings.
  27. This matches the implementation in tensor2tensor, but differs slightly
  28. from the description in Section 3.5 of "Attention Is All You Need".
  29. """
  30. half_dim = embedding_dim // 2
  31. emb = math.log(10000) / (half_dim - 1)
  32. emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
  33. emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
  34. emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
  35. if embedding_dim % 2 == 1:
  36. # zero pad
  37. emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
  38. if padding_idx is not None:
  39. emb[padding_idx, :] = 0
  40. return emb
  41. def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
  42. """Input is expected to be of size [bsz x seqlen]."""
  43. bsz, seq_len = input.shape[:2]
  44. max_pos = self.padding_idx + 1 + seq_len
  45. if self.weights is None or max_pos > self.weights.size(0):
  46. # recompute/expand embeddings if needed
  47. self.weights = SinusoidalPositionalEmbedding.get_embedding(
  48. max_pos,
  49. self.embedding_dim,
  50. self.padding_idx,
  51. )
  52. self.weights = self.weights.to(self._float_tensor)
  53. if incremental_state is not None:
  54. # positions is the same for every token when decoding a single step
  55. pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
  56. return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
  57. positions = make_positions(input, self.padding_idx) if positions is None else positions
  58. return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
  59. def max_positions(self):
  60. """Maximum number of supported positions."""
  61. return int(1e5) # an arbitrary large number
  62. class TransformerFFNLayer(nn.Module):
  63. def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'):
  64. super().__init__()
  65. self.kernel_size = kernel_size
  66. self.dropout = dropout
  67. self.act = act
  68. if padding == 'SAME':
  69. self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
  70. elif padding == 'LEFT':
  71. self.ffn_1 = nn.Sequential(
  72. nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
  73. nn.Conv1d(hidden_size, filter_size, kernel_size)
  74. )
  75. self.ffn_2 = Linear(filter_size, hidden_size)
  76. def forward(self, x, incremental_state=None):
  77. # x: T x B x C
  78. if incremental_state is not None:
  79. saved_state = self._get_input_buffer(incremental_state)
  80. if 'prev_input' in saved_state:
  81. prev_input = saved_state['prev_input']
  82. x = torch.cat((prev_input, x), dim=0)
  83. x = x[-self.kernel_size:]
  84. saved_state['prev_input'] = x
  85. self._set_input_buffer(incremental_state, saved_state)
  86. x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
  87. x = x * self.kernel_size ** -0.5
  88. if incremental_state is not None:
  89. x = x[-1:]
  90. if self.act == 'gelu':
  91. x = F.gelu(x)
  92. if self.act == 'relu':
  93. x = F.relu(x)
  94. x = F.dropout(x, self.dropout, training=self.training)
  95. x = self.ffn_2(x)
  96. return x
  97. def _get_input_buffer(self, incremental_state):
  98. return get_incremental_state(
  99. self,
  100. incremental_state,
  101. 'f',
  102. ) or {}
  103. def _set_input_buffer(self, incremental_state, buffer):
  104. set_incremental_state(
  105. self,
  106. incremental_state,
  107. 'f',
  108. buffer,
  109. )
  110. def clear_buffer(self, incremental_state):
  111. if incremental_state is not None:
  112. saved_state = self._get_input_buffer(incremental_state)
  113. if 'prev_input' in saved_state:
  114. del saved_state['prev_input']
  115. self._set_input_buffer(incremental_state, saved_state)
  116. class MultiheadAttention(nn.Module):
  117. def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
  118. add_bias_kv=False, add_zero_attn=False, self_attention=False,
  119. encoder_decoder_attention=False):
  120. super().__init__()
  121. self.embed_dim = embed_dim
  122. self.kdim = kdim if kdim is not None else embed_dim
  123. self.vdim = vdim if vdim is not None else embed_dim
  124. self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
  125. self.num_heads = num_heads
  126. self.dropout = dropout
  127. self.head_dim = embed_dim // num_heads
  128. assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
  129. self.scaling = self.head_dim ** -0.5
  130. self.self_attention = self_attention
  131. self.encoder_decoder_attention = encoder_decoder_attention
  132. assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
  133. 'value to be of the same size'
  134. if self.qkv_same_dim:
  135. self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
  136. else:
  137. self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
  138. self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
  139. self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
  140. if bias:
  141. self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
  142. else:
  143. self.register_parameter('in_proj_bias', None)
  144. self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
  145. if add_bias_kv:
  146. self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
  147. self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
  148. else:
  149. self.bias_k = self.bias_v = None
  150. self.add_zero_attn = add_zero_attn
  151. self.reset_parameters()
  152. self.enable_torch_version = False
  153. self.last_attn_probs = None
  154. def reset_parameters(self):
  155. if self.qkv_same_dim:
  156. nn.init.xavier_uniform_(self.in_proj_weight)
  157. else:
  158. nn.init.xavier_uniform_(self.k_proj_weight)
  159. nn.init.xavier_uniform_(self.v_proj_weight)
  160. nn.init.xavier_uniform_(self.q_proj_weight)
  161. nn.init.xavier_uniform_(self.out_proj.weight)
  162. if self.in_proj_bias is not None:
  163. nn.init.constant_(self.in_proj_bias, 0.)
  164. nn.init.constant_(self.out_proj.bias, 0.)
  165. if self.bias_k is not None:
  166. nn.init.xavier_normal_(self.bias_k)
  167. if self.bias_v is not None:
  168. nn.init.xavier_normal_(self.bias_v)
  169. def forward(
  170. self,
  171. query, key, value,
  172. key_padding_mask=None,
  173. incremental_state=None,
  174. need_weights=True,
  175. static_kv=False,
  176. attn_mask=None,
  177. before_softmax=False,
  178. need_head_weights=False,
  179. enc_dec_attn_constraint_mask=None,
  180. reset_attn_weight=None
  181. ):
  182. """Input shape: Time x Batch x Channel
  183. Args:
  184. key_padding_mask (ByteTensor, optional): mask to exclude
  185. keys that are pads, of shape `(batch, src_len)`, where
  186. padding elements are indicated by 1s.
  187. need_weights (bool, optional): return the attention weights,
  188. averaged over heads (default: False).
  189. attn_mask (ByteTensor, optional): typically used to
  190. implement causal attention, where the mask prevents the
  191. attention from looking forward in time (default: None).
  192. before_softmax (bool, optional): return the raw attention
  193. weights and values before the attention softmax.
  194. need_head_weights (bool, optional): return the attention
  195. weights for each head. Implies *need_weights*. Default:
  196. return the average attention weights over all heads.
  197. """
  198. if need_head_weights:
  199. need_weights = True
  200. tgt_len, bsz, embed_dim = query.size()
  201. assert embed_dim == self.embed_dim
  202. assert list(query.size()) == [tgt_len, bsz, embed_dim]
  203. if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None:
  204. if self.qkv_same_dim:
  205. return F.multi_head_attention_forward(query, key, value,
  206. self.embed_dim, self.num_heads,
  207. self.in_proj_weight,
  208. self.in_proj_bias, self.bias_k, self.bias_v,
  209. self.add_zero_attn, self.dropout,
  210. self.out_proj.weight, self.out_proj.bias,
  211. self.training, key_padding_mask, need_weights,
  212. attn_mask)
  213. else:
  214. return F.multi_head_attention_forward(query, key, value,
  215. self.embed_dim, self.num_heads,
  216. torch.empty([0]),
  217. self.in_proj_bias, self.bias_k, self.bias_v,
  218. self.add_zero_attn, self.dropout,
  219. self.out_proj.weight, self.out_proj.bias,
  220. self.training, key_padding_mask, need_weights,
  221. attn_mask, use_separate_proj_weight=True,
  222. q_proj_weight=self.q_proj_weight,
  223. k_proj_weight=self.k_proj_weight,
  224. v_proj_weight=self.v_proj_weight)
  225. if incremental_state is not None:
  226. saved_state = self._get_input_buffer(incremental_state)
  227. if 'prev_key' in saved_state:
  228. # previous time steps are cached - no need to recompute
  229. # key and value if they are static
  230. if static_kv:
  231. assert self.encoder_decoder_attention and not self.self_attention
  232. key = value = None
  233. else:
  234. saved_state = None
  235. if self.self_attention:
  236. # self-attention
  237. q, k, v = self.in_proj_qkv(query)
  238. elif self.encoder_decoder_attention:
  239. # encoder-decoder attention
  240. q = self.in_proj_q(query)
  241. if key is None:
  242. assert value is None
  243. k = v = None
  244. else:
  245. k = self.in_proj_k(key)
  246. v = self.in_proj_v(key)
  247. else:
  248. q = self.in_proj_q(query)
  249. k = self.in_proj_k(key)
  250. v = self.in_proj_v(value)
  251. q = q * self.scaling
  252. if self.bias_k is not None:
  253. assert self.bias_v is not None
  254. k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
  255. v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
  256. if attn_mask is not None:
  257. attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
  258. if key_padding_mask is not None:
  259. key_padding_mask = torch.cat(
  260. [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
  261. q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
  262. if k is not None:
  263. k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
  264. if v is not None:
  265. v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
  266. if saved_state is not None:
  267. # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
  268. if 'prev_key' in saved_state:
  269. prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)
  270. if static_kv:
  271. k = prev_key
  272. else:
  273. k = torch.cat((prev_key, k), dim=1)
  274. if 'prev_value' in saved_state:
  275. prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)
  276. if static_kv:
  277. v = prev_value
  278. else:
  279. v = torch.cat((prev_value, v), dim=1)
  280. if 'prev_key_padding_mask' in saved_state and saved_state['prev_key_padding_mask'] is not None:
  281. prev_key_padding_mask = saved_state['prev_key_padding_mask']
  282. if static_kv:
  283. key_padding_mask = prev_key_padding_mask
  284. else:
  285. key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1)
  286. saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)
  287. saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)
  288. saved_state['prev_key_padding_mask'] = key_padding_mask
  289. self._set_input_buffer(incremental_state, saved_state)
  290. src_len = k.size(1)
  291. # This is part of a workaround to get around fork/join parallelism
  292. # not supporting Optional types.
  293. if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
  294. key_padding_mask = None
  295. if key_padding_mask is not None:
  296. assert key_padding_mask.size(0) == bsz
  297. assert key_padding_mask.size(1) == src_len
  298. if self.add_zero_attn:
  299. src_len += 1
  300. k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
  301. v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
  302. if attn_mask is not None:
  303. attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
  304. if key_padding_mask is not None:
  305. key_padding_mask = torch.cat(
  306. [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
  307. attn_weights = torch.bmm(q, k.transpose(1, 2))
  308. attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
  309. assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
  310. if attn_mask is not None:
  311. if len(attn_mask.shape) == 2:
  312. attn_mask = attn_mask.unsqueeze(0)
  313. elif len(attn_mask.shape) == 3:
  314. attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
  315. bsz * self.num_heads, tgt_len, src_len)
  316. attn_weights = attn_weights + attn_mask
  317. if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
  318. attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
  319. attn_weights = attn_weights.masked_fill(
  320. enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
  321. -1e8,
  322. )
  323. attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
  324. if key_padding_mask is not None:
  325. # don't attend to padding symbols
  326. attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
  327. attn_weights = attn_weights.masked_fill(
  328. key_padding_mask.unsqueeze(1).unsqueeze(2),
  329. -1e8,
  330. )
  331. attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
  332. attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
  333. if before_softmax:
  334. return attn_weights, v
  335. attn_weights_float = softmax(attn_weights, dim=-1)
  336. attn_weights = attn_weights_float.type_as(attn_weights)
  337. attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
  338. if reset_attn_weight is not None:
  339. if reset_attn_weight:
  340. self.last_attn_probs = attn_probs.detach()
  341. else:
  342. assert self.last_attn_probs is not None
  343. attn_probs = self.last_attn_probs
  344. attn = torch.bmm(attn_probs, v)
  345. assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
  346. attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
  347. attn = self.out_proj(attn)
  348. if need_weights:
  349. attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
  350. if not need_head_weights:
  351. # average attention weights over heads
  352. attn_weights = attn_weights.mean(dim=0)
  353. else:
  354. attn_weights = None
  355. return attn, (attn_weights, attn_logits)
  356. def in_proj_qkv(self, query):
  357. return self._in_proj(query).chunk(3, dim=-1)
  358. def in_proj_q(self, query):
  359. if self.qkv_same_dim:
  360. return self._in_proj(query, end=self.embed_dim)
  361. else:
  362. bias = self.in_proj_bias
  363. if bias is not None:
  364. bias = bias[:self.embed_dim]
  365. return F.linear(query, self.q_proj_weight, bias)
  366. def in_proj_k(self, key):
  367. if self.qkv_same_dim:
  368. return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
  369. else:
  370. weight = self.k_proj_weight
  371. bias = self.in_proj_bias
  372. if bias is not None:
  373. bias = bias[self.embed_dim:2 * self.embed_dim]
  374. return F.linear(key, weight, bias)
  375. def in_proj_v(self, value):
  376. if self.qkv_same_dim:
  377. return self._in_proj(value, start=2 * self.embed_dim)
  378. else:
  379. weight = self.v_proj_weight
  380. bias = self.in_proj_bias
  381. if bias is not None:
  382. bias = bias[2 * self.embed_dim:]
  383. return F.linear(value, weight, bias)
  384. def _in_proj(self, input, start=0, end=None):
  385. weight = self.in_proj_weight
  386. bias = self.in_proj_bias
  387. weight = weight[start:end, :]
  388. if bias is not None:
  389. bias = bias[start:end]
  390. return F.linear(input, weight, bias)
  391. def _get_input_buffer(self, incremental_state):
  392. return get_incremental_state(
  393. self,
  394. incremental_state,
  395. 'attn_state',
  396. ) or {}
  397. def _set_input_buffer(self, incremental_state, buffer):
  398. set_incremental_state(
  399. self,
  400. incremental_state,
  401. 'attn_state',
  402. buffer,
  403. )
  404. def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
  405. return attn_weights
  406. def clear_buffer(self, incremental_state=None):
  407. if incremental_state is not None:
  408. saved_state = self._get_input_buffer(incremental_state)
  409. if 'prev_key' in saved_state:
  410. del saved_state['prev_key']
  411. if 'prev_value' in saved_state:
  412. del saved_state['prev_value']
  413. self._set_input_buffer(incremental_state, saved_state)
  414. class EncSALayer(nn.Module):
  415. def __init__(self, c, num_heads, dropout, attention_dropout=0.1,
  416. relu_dropout=0.1, kernel_size=9, padding='SAME', act='gelu',
  417. ffn_hidden_size=1024):
  418. super().__init__()
  419. self.c = c
  420. self.dropout = dropout
  421. self.num_heads = num_heads
  422. if num_heads > 0:
  423. self.layer_norm1 = LayerNorm(c)
  424. self.self_attn = MultiheadAttention(
  425. self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False)
  426. self.layer_norm2 = LayerNorm(c)
  427. self.ffn = TransformerFFNLayer(
  428. c, ffn_hidden_size, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act)
  429. def forward(self, x, encoder_padding_mask=None, **kwargs):
  430. layer_norm_training = kwargs.get('layer_norm_training', None)
  431. if layer_norm_training is not None:
  432. self.layer_norm1.training = layer_norm_training
  433. self.layer_norm2.training = layer_norm_training
  434. if self.num_heads > 0:
  435. residual = x
  436. x = self.layer_norm1(x)
  437. x, _, = self.self_attn(
  438. query=x,
  439. key=x,
  440. value=x,
  441. key_padding_mask=encoder_padding_mask
  442. )
  443. x = F.dropout(x, self.dropout, training=self.training)
  444. x = residual + x
  445. x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
  446. residual = x
  447. x = self.layer_norm2(x)
  448. x = self.ffn(x)
  449. x = F.dropout(x, self.dropout, training=self.training)
  450. x = residual + x
  451. x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
  452. return x
  453. class DecSALayer(nn.Module):
  454. def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1,
  455. kernel_size=9, ffn_hidden_size=1024, act='gelu', post_ln=False):
  456. super().__init__()
  457. self.c = c
  458. self.dropout = dropout
  459. self.layer_norm1 = LayerNorm(c)
  460. self.self_attn = MultiheadAttention(
  461. c, num_heads, self_attention=True, dropout=attention_dropout, bias=False
  462. )
  463. self.layer_norm2 = LayerNorm(c)
  464. self.encoder_attn = MultiheadAttention(
  465. c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False,
  466. )
  467. self.layer_norm3 = LayerNorm(c)
  468. self.ffn = TransformerFFNLayer(
  469. c, ffn_hidden_size, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act)
  470. self.post_ln = post_ln
  471. def forward(
  472. self,
  473. x,
  474. encoder_out=None,
  475. encoder_padding_mask=None,
  476. incremental_state=None,
  477. self_attn_mask=None,
  478. self_attn_padding_mask=None,
  479. attn_out=None,
  480. reset_attn_weight=None,
  481. **kwargs,
  482. ):
  483. layer_norm_training = kwargs.get('layer_norm_training', None)
  484. if layer_norm_training is not None:
  485. self.layer_norm1.training = layer_norm_training
  486. self.layer_norm2.training = layer_norm_training
  487. self.layer_norm3.training = layer_norm_training
  488. residual = x
  489. if not self.post_ln:
  490. x = self.layer_norm1(x)
  491. x, _ = self.self_attn(
  492. query=x,
  493. key=x,
  494. value=x,
  495. key_padding_mask=self_attn_padding_mask,
  496. incremental_state=incremental_state,
  497. attn_mask=self_attn_mask
  498. )
  499. x = F.dropout(x, self.dropout, training=self.training)
  500. x = residual + x
  501. if self.post_ln:
  502. x = self.layer_norm1(x)
  503. attn_logits = None
  504. if encoder_out is not None or attn_out is not None:
  505. residual = x
  506. if not self.post_ln:
  507. x = self.layer_norm2(x)
  508. if encoder_out is not None:
  509. x, attn = self.encoder_attn(
  510. query=x,
  511. key=encoder_out,
  512. value=encoder_out,
  513. key_padding_mask=encoder_padding_mask,
  514. incremental_state=incremental_state,
  515. static_kv=True,
  516. enc_dec_attn_constraint_mask=get_incremental_state(self, incremental_state,
  517. 'enc_dec_attn_constraint_mask'),
  518. reset_attn_weight=reset_attn_weight
  519. )
  520. attn_logits = attn[1]
  521. elif attn_out is not None:
  522. x = self.encoder_attn.in_proj_v(attn_out)
  523. if encoder_out is not None or attn_out is not None:
  524. x = F.dropout(x, self.dropout, training=self.training)
  525. x = residual + x
  526. if self.post_ln:
  527. x = self.layer_norm2(x)
  528. residual = x
  529. if not self.post_ln:
  530. x = self.layer_norm3(x)
  531. x = self.ffn(x, incremental_state=incremental_state)
  532. x = F.dropout(x, self.dropout, training=self.training)
  533. x = residual + x
  534. if self.post_ln:
  535. x = self.layer_norm3(x)
  536. return x, attn_logits
  537. def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None):
  538. self.encoder_attn.clear_buffer(incremental_state)
  539. self.ffn.clear_buffer(incremental_state)
  540. def set_buffer(self, name, tensor, incremental_state):
  541. return set_incremental_state(self, incremental_state, name, tensor)
  542. class TransformerEncoderLayer(nn.Module):
  543. def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2, ffn_hidden_size=1024):
  544. super().__init__()
  545. self.hidden_size = hidden_size
  546. self.dropout = dropout
  547. self.num_heads = num_heads
  548. self.op = EncSALayer(
  549. hidden_size, num_heads, dropout=dropout,
  550. attention_dropout=0.0, relu_dropout=dropout,
  551. kernel_size=kernel_size, ffn_hidden_size=ffn_hidden_size)
  552. def forward(self, x, **kwargs):
  553. return self.op(x, **kwargs)
  554. class TransformerDecoderLayer(nn.Module):
  555. def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2, ffn_hidden_size=1024, post_ln=False):
  556. super().__init__()
  557. self.hidden_size = hidden_size
  558. self.dropout = dropout
  559. self.num_heads = num_heads
  560. self.op = DecSALayer(
  561. hidden_size, num_heads, dropout=dropout,
  562. attention_dropout=0.0, relu_dropout=dropout,
  563. kernel_size=kernel_size, ffn_hidden_size=ffn_hidden_size,
  564. post_ln=post_ln)
  565. def forward(self, x, **kwargs):
  566. return self.op(x, **kwargs)
  567. def clear_buffer(self, *args):
  568. return self.op.clear_buffer(*args)
  569. def set_buffer(self, *args):
  570. return self.op.set_buffer(*args)
  571. class FFTBlocks(nn.Module):
  572. def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=0.0,
  573. num_heads=2, use_pos_embed=True, use_last_norm=True,
  574. use_pos_embed_alpha=True, ffn_hidden_size=1024):
  575. super().__init__()
  576. self.num_layers = num_layers
  577. embed_dim = self.hidden_size = hidden_size
  578. self.dropout = dropout
  579. self.use_pos_embed = use_pos_embed
  580. self.use_last_norm = use_last_norm
  581. if use_pos_embed:
  582. self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS
  583. self.padding_idx = 0
  584. self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1
  585. self.embed_positions = SinusoidalPositionalEmbedding(
  586. embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
  587. )
  588. self.layers = nn.ModuleList([])
  589. self.layers.extend([
  590. TransformerEncoderLayer(self.hidden_size, self.dropout,
  591. kernel_size=ffn_kernel_size, num_heads=num_heads,
  592. ffn_hidden_size=ffn_hidden_size)
  593. for _ in range(self.num_layers)
  594. ])
  595. if self.use_last_norm:
  596. self.layer_norm = nn.LayerNorm(embed_dim)
  597. else:
  598. self.layer_norm = None
  599. def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False):
  600. """
  601. :param x: [B, T, C]
  602. :param padding_mask: [B, T]
  603. :return: [B, T, C] or [L, B, T, C]
  604. """
  605. padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask
  606. nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1]
  607. if self.use_pos_embed:
  608. positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
  609. x = x + positions
  610. x = F.dropout(x, p=self.dropout, training=self.training)
  611. # B x T x C -> T x B x C
  612. x = x.transpose(0, 1) * nonpadding_mask_TB
  613. hiddens = []
  614. for layer in self.layers:
  615. x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB
  616. hiddens.append(x)
  617. if self.use_last_norm:
  618. x = self.layer_norm(x) * nonpadding_mask_TB
  619. if return_hiddens:
  620. x = torch.stack(hiddens, 0) # [L, T, B, C]
  621. x = x.transpose(1, 2) # [L, B, T, C]
  622. else:
  623. x = x.transpose(0, 1) # [B, T, C]
  624. return x
  625. class FastSpeechEncoder(FFTBlocks):
  626. def __init__(self, dict_size, hidden_size=256, num_layers=4, kernel_size=9,
  627. dropout=0.0, num_heads=2, ffn_hidden_size=1024):
  628. super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads,
  629. use_pos_embed=False, dropout=dropout, ffn_hidden_size=ffn_hidden_size)
  630. self.embed_tokens = Embedding(dict_size, hidden_size, 0)
  631. self.embed_scale = math.sqrt(hidden_size)
  632. self.padding_idx = 0
  633. self.embed_positions = SinusoidalPositionalEmbedding(
  634. hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
  635. )
  636. def forward(self, txt_tokens, attn_mask=None, other_embeds=0):
  637. """
  638. :param txt_tokens: [B, T]
  639. :return: {
  640. 'encoder_out': [B x T x C]
  641. }
  642. """
  643. encoder_padding_mask = txt_tokens.eq(self.padding_idx).data
  644. x = self.forward_embedding(txt_tokens) + other_embeds # [B, T, H]
  645. if self.num_layers > 0:
  646. x = super(FastSpeechEncoder, self).forward(x, encoder_padding_mask, attn_mask=attn_mask)
  647. return x
  648. def forward_embedding(self, txt_tokens):
  649. # embed tokens and positions
  650. x = self.embed_scale * self.embed_tokens(txt_tokens)
  651. if self.use_pos_embed:
  652. positions = self.embed_positions(txt_tokens)
  653. x = x + positions
  654. x = F.dropout(x, p=self.dropout, training=self.training)
  655. return x