gpt.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. import math
  2. import torch
  3. from typing import Optional, Tuple
  4. from torch import nn
  5. from utils.nn.seq_utils import get_incremental_state, set_incremental_state, softmax, make_positions
  6. import torch.nn.functional as F
  7. # from flash_attn import flash_attn_qkvpacked_func, flash_attn_func
  8. DEFAULT_MAX_SOURCE_POSITIONS = 20000
  9. DEFAULT_MAX_TARGET_POSITIONS = 20000
  10. class RotaryEmbeddings(nn.Module):
  11. cos: torch.Tensor
  12. sin: torch.Tensor
  13. theta: torch.Tensor
  14. def __init__(
  15. self,
  16. width: int,
  17. *,
  18. seq_len: int = 4000,
  19. base: int = 10000,
  20. device: Optional[torch.device] = None,
  21. ):
  22. """Rotary embeddings (Su et al., 2021) layer. The rotary embedding
  23. will be precomputed for up to 'seq _len' positions. The embedding
  24. will be recomputed when a longer sequence is found in the input.
  25. :param width:
  26. Rotary embedding dimensionality, must be even.
  27. :param seq_len:
  28. Number of positons to initially precompute.
  29. :param base:
  30. The base used for Θ_i, determines the cycle length of the
  31. embeddings.
  32. :param device: Device on which the module is to be initialized.
  33. """
  34. super().__init__()
  35. if width % 2:
  36. raise ValueError(f"Width of rotary embeddings must be even, was: {width}")
  37. # Ignore allocations on the meta device as we don't persist our buffer,
  38. # i.e., we don't expect the backing tensor to be replaced with pretrained weights.
  39. if device is not None and device.type == "meta":
  40. device = None
  41. # Θ_i = 10000^(-2(i-1)/d)
  42. theta = torch.pow(
  43. base, -torch.arange(0, width, 2, dtype=torch.float, device=device) / width
  44. )
  45. self.register_buffer("theta", theta, persistent=False)
  46. self._create_rotary_embed(width=width, length=seq_len)
  47. def _create_rotary_embed(self, *, width: int, length: int):
  48. # mΘ
  49. position = torch.arange(length, device=self.theta.device).unsqueeze(1)
  50. m_theta = position * self.theta.unsqueeze(0)
  51. # We apply both sin and cos twice (see Eq 15, 34), but the ordering
  52. # is changed for compatibility with most common implementations.
  53. m_theta = torch.cat([m_theta, m_theta], dim=-1)
  54. re_cos = m_theta.cos().view([length, width]).half()
  55. re_sin = m_theta.sin().view([length, width]).half()
  56. self.register_buffer("cos", re_cos, persistent=False)
  57. self.register_buffer("sin", re_sin, persistent=False)
  58. def _rotate(self, input: torch.Tensor):
  59. """Rotate the input tensor by half of its innermost width.
  60. input (Tensor): array to rotate.
  61. RETURNS (Tensor): rotated array.
  62. Shapes:
  63. input - (..., width)
  64. output - (..., width)
  65. """
  66. half_idx = input.shape[-1] // 2
  67. input_1 = -input[..., half_idx:]
  68. input_2 = input[..., :half_idx]
  69. return torch.cat([input_1, input_2], dim=-1)
  70. def forward(self, input: torch.Tensor, *, positions: Optional[torch.Tensor] = None):
  71. """
  72. Apply rotary embeddings to an array.
  73. :param input: Array to apply the rotary embeddings to.
  74. :param positions: positions of the inputs. If no positions are
  75. provided, they are assumed to be [0, seq_len).
  76. :return: Array with the rotary embeddings applied.
  77. Shapes:
  78. input - (batch_size, num_heads, seq_len, width_per_head)
  79. positions - (batch_size, seq_len)
  80. output - (batch_size, num_heads, seq_len, width_per_head)
  81. """
  82. batch_size, _, seq_len, width = input.shape
  83. if positions is None:
  84. # Fastpath: positions from [0..seq_len), avoid indexing.
  85. if self.cos.size(-2) < seq_len:
  86. self._create_rotary_embed(width=width, length=seq_len)
  87. rot_cos = self.cos[:seq_len, :].view(1, 1, seq_len, width)
  88. rot_sin = self.sin[:seq_len, :].view(1, 1, seq_len, width)
  89. else:
  90. max_len = int(positions.max()) + 1
  91. if self.cos.size(-2) < max_len:
  92. self._create_rotary_embed(width=width, length=max_len)
  93. # Flatten positions to index cos/sin arrays, then unflatten.
  94. #
  95. # Example shapes:
  96. #
  97. # positions_flat - (batch_size * seq_len)
  98. # self.cos - (max_len, width)
  99. # rot_cos - (batch_size, seq_len, width)
  100. positions_flat = positions.view(-1)
  101. rot_cos = self.cos[positions_flat].view(batch_size, 1, seq_len, width)
  102. rot_sin = self.sin[positions_flat].view(batch_size, 1, seq_len, width)
  103. # Eq 34 with ordering changed for compatibility.
  104. return rot_cos * input + rot_sin * self._rotate(input)
  105. class LayerNorm(nn.Module):
  106. """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
  107. def __init__(self, ndim, bias=False):
  108. super().__init__()
  109. self.weight = nn.Parameter(torch.ones(ndim))
  110. self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
  111. def forward(self, input):
  112. return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
  113. class CausalSelfAttention(nn.Module):
  114. def __init__(self, embed_dim, num_heads, dropout=0.):
  115. super().__init__()
  116. # Typically, bias = True in Linears and LayerNorms, like GPT-2. But we set bias = False: a bit better and faster (following https://github.com/karpathy/nanoGPT)
  117. assert embed_dim % num_heads == 0
  118. self.embed_dim = embed_dim
  119. self.num_heads = num_heads
  120. self.dropout = dropout
  121. self.head_dim = embed_dim // num_heads
  122. assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
  123. self.scaling = self.head_dim ** -0.5
  124. # key, query, value projections for all heads, but in a batch
  125. self.c_attn = nn.Linear(embed_dim, 3 * embed_dim, bias=False)
  126. # output projection
  127. self.out_proj = nn.Linear(embed_dim, embed_dim, bias=False)
  128. # rotary embeddings
  129. self.rotary_embeds = RotaryEmbeddings(width=embed_dim // num_heads)
  130. # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
  131. self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
  132. if not self.flash:
  133. print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
  134. def forward(
  135. self,
  136. query, key, value,
  137. spk_pos_ids_flat=None,
  138. incremental_state=None,
  139. need_weights=True,
  140. static_kv=False,
  141. attn_mask=None,
  142. need_head_weights=False,
  143. enc_dec_attn_constraint_mask=None,
  144. ):
  145. """Input shape: Time x Batch x Channel
  146. Args:
  147. need_weights (bool, optional): return the attention weights,
  148. averaged over heads (default: False).
  149. attn_mask (ByteTensor, optional): typically used to
  150. implement causal attention, where the mask prevents the
  151. attention from looking forward in time (default: None).
  152. need_head_weights (bool, optional): return the attention
  153. weights for each head. Implies *need_weights*. Default:
  154. return the average attention weights over all heads.
  155. """
  156. if need_head_weights:
  157. need_weights = True
  158. tgt_len, bsz, embed_dim = query.size()
  159. assert embed_dim == self.embed_dim
  160. assert list(query.size()) == [tgt_len, bsz, embed_dim]
  161. if incremental_state is not None:
  162. saved_state = self._get_input_buffer(incremental_state)
  163. else:
  164. saved_state = None
  165. # calculate query, key, values for all heads in batch and move head forward to be the batch dim
  166. q, k, v = self.c_attn(query).split(self.embed_dim, dim=2)
  167. q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
  168. k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
  169. v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
  170. # Apply rot embedding and store incremental_state
  171. q = self.rotary_embeds(q[None, :], positions=spk_pos_ids_flat)[0]
  172. if saved_state is not None:
  173. # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
  174. if 'prev_key' in saved_state:
  175. prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)
  176. if static_kv:
  177. k = prev_key
  178. else:
  179. k = torch.cat((prev_key, k), dim=1)
  180. if 'prev_value' in saved_state:
  181. prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)
  182. if static_kv:
  183. v = prev_value
  184. else:
  185. v = torch.cat((prev_value, v), dim=1)
  186. saved_state['prev_key'], saved_state['prev_value'] = k.view(bsz, self.num_heads, -1, self.head_dim), v.view(
  187. bsz, self.num_heads, -1, self.head_dim)
  188. self._set_input_buffer(incremental_state, saved_state)
  189. if incremental_state is not None:
  190. key_pos = torch.arange(k.shape[-2], device=q.device).unsqueeze(0)
  191. else:
  192. key_pos = spk_pos_ids_flat
  193. k = self.rotary_embeds(k[None, :], positions=key_pos)[0]
  194. src_len = k.size(1)
  195. # Start Attention
  196. if self.flash:
  197. # efficient attention using Flash Attention CUDA kernels
  198. attn = torch.nn.functional.scaled_dot_product_attention(
  199. q, k, v, attn_mask=attn_mask, dropout_p=0,
  200. is_causal=False)
  201. assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
  202. attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
  203. # Flash Attn 2
  204. # from flash_attn import flash_attn_func
  205. # q, k, v = q.transpose(0, 1)[None, :], k.transpose(0, 1)[None, :], v.transpose(0, 1)[None, :]
  206. # attn = flash_attn_func(q, k, v, dropout_p=0.0, causal=False)[0].contiguous().view(tgt_len, bsz, embed_dim)
  207. attn = self.out_proj(attn)
  208. attn_logits = None
  209. else:
  210. attn_weights = torch.bmm(q, k.transpose(1, 2))
  211. assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
  212. if attn_mask is not None:
  213. if len(attn_mask.shape) == 2:
  214. attn_mask = attn_mask.unsqueeze(0)
  215. elif len(attn_mask.shape) == 3:
  216. attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
  217. bsz * self.num_heads, tgt_len, src_len)
  218. attn_weights = attn_weights + attn_mask
  219. attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
  220. attn_weights_float = softmax(attn_weights, dim=-1)
  221. attn_weights = attn_weights_float.type_as(attn_weights)
  222. attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
  223. attn = torch.bmm(attn_probs, v)
  224. assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
  225. attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
  226. attn = self.out_proj(attn)
  227. if need_weights:
  228. attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
  229. if not need_head_weights:
  230. # average attention weights over heads
  231. attn_weights = attn_weights.mean(dim=0)
  232. else:
  233. attn_weights = None
  234. return attn, (attn_weights, attn_logits)
  235. def _get_input_buffer(self, incremental_state):
  236. return get_incremental_state(
  237. self,
  238. incremental_state,
  239. 'attn_state',
  240. ) or {}
  241. def _set_input_buffer(self, incremental_state, buffer):
  242. set_incremental_state(
  243. self,
  244. incremental_state,
  245. 'attn_state',
  246. buffer,
  247. )
  248. def clear_buffer(self, incremental_state=None):
  249. if incremental_state is not None:
  250. saved_state = self._get_input_buffer(incremental_state)
  251. if 'prev_key' in saved_state:
  252. del saved_state['prev_key']
  253. if 'prev_value' in saved_state:
  254. del saved_state['prev_value']
  255. self._set_input_buffer(incremental_state, saved_state)
  256. class TransformerFFNLayer(nn.Module):
  257. def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'):
  258. super().__init__()
  259. self.kernel_size = kernel_size
  260. self.dropout = dropout
  261. self.act = act
  262. if padding == 'SAME':
  263. self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2, bias=False)
  264. elif padding == 'LEFT':
  265. self.ffn_1 = nn.Sequential(
  266. nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
  267. nn.Conv1d(hidden_size, filter_size, kernel_size, bias=False)
  268. )
  269. self.ffn_2 = nn.Linear(filter_size, hidden_size, bias=False)
  270. def forward(self, x, incremental_state=None):
  271. # x: T x B x C
  272. if incremental_state is not None:
  273. T_inp = x.shape[0]
  274. saved_state = self._get_input_buffer(incremental_state)
  275. if 'prev_input' in saved_state:
  276. prev_input = saved_state['prev_input']
  277. x = torch.cat((prev_input, x), dim=0)
  278. x = x[-self.kernel_size:]
  279. saved_state['prev_input'] = x
  280. self._set_input_buffer(incremental_state, saved_state)
  281. x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
  282. x = x * self.kernel_size ** -0.5
  283. if incremental_state is not None:
  284. x = x[-T_inp:]
  285. # if self.act == 'gelu':
  286. # x = F.gelu(x)
  287. # if self.act == 'relu':
  288. # x = F.relu(x)
  289. x = F.silu(x)
  290. x = F.dropout(x, self.dropout, training=self.training)
  291. x = self.ffn_2(x)
  292. return x
  293. def _get_input_buffer(self, incremental_state):
  294. return get_incremental_state(
  295. self,
  296. incremental_state,
  297. 'f',
  298. ) or {}
  299. def _set_input_buffer(self, incremental_state, buffer):
  300. set_incremental_state(
  301. self,
  302. incremental_state,
  303. 'f',
  304. buffer,
  305. )
  306. def clear_buffer(self, incremental_state):
  307. if incremental_state is not None:
  308. saved_state = self._get_input_buffer(incremental_state)
  309. if 'prev_input' in saved_state:
  310. del saved_state['prev_input']
  311. self._set_input_buffer(incremental_state, saved_state)
  312. class GPTBlock(nn.Module):
  313. def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1,
  314. kernel_size=9, ffn_hidden_size=1024, act='gelu', post_ln=False, norm_cls=LayerNorm):
  315. super().__init__()
  316. self.c = c
  317. self.dropout = dropout
  318. self.layer_norm1 = norm_cls(c)
  319. self.self_attn = CausalSelfAttention(
  320. c, num_heads, dropout=attention_dropout
  321. )
  322. self.layer_norm2 = norm_cls(c)
  323. self.ffn = TransformerFFNLayer(
  324. c, ffn_hidden_size, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act)
  325. self.post_ln = post_ln
  326. def forward(
  327. self,
  328. x,
  329. encoder_out=None,
  330. encoder_padding_mask=None,
  331. incremental_state=None,
  332. self_attn_mask=None,
  333. attn_out=None,
  334. spk_pos_ids_flat=None,
  335. **kwargs,
  336. ):
  337. layer_norm_training = kwargs.get('layer_norm_training', None)
  338. if layer_norm_training is not None:
  339. self.layer_norm1.training = layer_norm_training
  340. self.layer_norm2.training = layer_norm_training
  341. residual = x
  342. if not self.post_ln:
  343. x = self.layer_norm1(x)
  344. x, _ = self.self_attn(
  345. query=x,
  346. key=x,
  347. value=x,
  348. incremental_state=incremental_state,
  349. attn_mask=self_attn_mask,
  350. spk_pos_ids_flat=spk_pos_ids_flat,
  351. need_weights=False
  352. )
  353. x = F.dropout(x, self.dropout, training=self.training)
  354. x = residual + x
  355. if self.post_ln:
  356. x = self.layer_norm1(x)
  357. attn_logits = None
  358. residual = x
  359. if not self.post_ln:
  360. x = self.layer_norm2(x)
  361. x = self.ffn(x, incremental_state=incremental_state)
  362. x = F.dropout(x, self.dropout, training=self.training)
  363. x = residual + x
  364. if self.post_ln:
  365. x = self.layer_norm2(x)
  366. return x, attn_logits
  367. def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None):
  368. self.encoder_attn.clear_buffer(incremental_state)
  369. self.ffn.clear_buffer(incremental_state)
  370. def set_buffer(self, name, tensor, incremental_state):
  371. return set_incremental_state(self, incremental_state, name, tensor)
  372. class GPTLayer(nn.Module):
  373. def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=8, ffn_hidden_size=1024, post_ln=False,
  374. lm_num_layers=10, norm_cls=LayerNorm):
  375. super().__init__()
  376. self.hidden_size = hidden_size
  377. self.dropout = dropout
  378. self.num_heads = num_heads
  379. self.op = GPTBlock(
  380. hidden_size, num_heads, dropout=dropout,
  381. attention_dropout=0.0, relu_dropout=dropout,
  382. kernel_size=kernel_size, ffn_hidden_size=ffn_hidden_size,
  383. post_ln=post_ln, norm_cls=norm_cls)
  384. # init all weights
  385. self.apply(self._init_weights)
  386. # apply special scaled init to the residual projections, per GPT-2 paper
  387. for pn, p in self.named_parameters():
  388. if pn.endswith('ffn_2.weight') or pn.endswith('out_proj.weight'):
  389. torch.nn.init.normal_(p, mean=0.0, std=0.02 / math.sqrt(2 * lm_num_layers))
  390. def _init_weights(self, module):
  391. if isinstance(module, nn.Linear):
  392. torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
  393. if module.bias is not None:
  394. torch.nn.init.zeros_(module.bias)
  395. elif isinstance(module, nn.Embedding):
  396. torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
  397. @torch.autocast(device_type='cuda')
  398. def forward(self, x, **kwargs):
  399. return self.op(x, **kwargs)
  400. def clear_buffer(self, *args):
  401. return self.op.clear_buffer(*args)
  402. def set_buffer(self, *args):
  403. return self.op.set_buffer(*args)