test_optimized_layers.py 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. from typing import Optional, Tuple
  2. import pytest
  3. import torch
  4. from transformers.cache_utils import DynamicCache
  5. from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
  6. from transformers.models.falcon.modeling_falcon import FalconDecoderLayer, FalconModel, build_alibi_tensor
  7. from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaModel
  8. from petals.server.block_utils import get_model_block
  9. from petals.utils.auto_config import AutoDistributedConfig
  10. from petals.utils.convert_block import QuantType, convert_block
  11. from test_utils import MODEL_NAME
  12. KVCache = Tuple[torch.Tensor, torch.Tensor]
  13. class UnoptimizedWrappedFalconBlock(FalconDecoderLayer):
  14. def forward(
  15. self,
  16. hidden_states: torch.Tensor,
  17. *args,
  18. attention_mask: Optional[torch.Tensor] = None,
  19. alibi: Optional[torch.Tensor] = None,
  20. layer_past: Optional[KVCache] = None,
  21. use_cache: bool = False,
  22. **kwargs,
  23. ):
  24. batch_size, seq_length = hidden_states.shape[:2]
  25. if layer_past is not None:
  26. layer_past = self._reorder_cache_from_bloom_to_falcon(layer_past)
  27. past_length = 0 if layer_past is None else layer_past[0].shape[1]
  28. seq_length_with_past = seq_length + past_length
  29. attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
  30. if alibi is None and self.config.alibi:
  31. alibi = build_alibi_tensor(attention_mask, num_heads=self.num_heads, dtype=hidden_states.dtype)
  32. attention_mask = FalconModel._prepare_attn_mask(attention_mask, (batch_size, seq_length), past_length)
  33. outputs = super().forward(
  34. hidden_states,
  35. *args,
  36. attention_mask=attention_mask,
  37. alibi=alibi,
  38. layer_past=layer_past,
  39. use_cache=use_cache,
  40. **kwargs,
  41. )
  42. if use_cache:
  43. present_key_value = outputs[-1]
  44. present_key_value = self._reorder_cache_from_falcon_to_bloom(present_key_value)
  45. outputs = outputs[:-1] + (present_key_value,)
  46. return outputs
  47. def _reorder_cache_from_bloom_to_falcon(self, key_value: KVCache) -> KVCache:
  48. key_states, value_states = key_value
  49. key_states = key_states.permute(0, 2, 1)
  50. assert key_states.shape == value_states.shape # Both are [batch_size * num_kv_heads, seq_len, head_dim]
  51. if self.config.new_decoder_architecture:
  52. key_states = self._expand_states(key_states)
  53. value_states = self._expand_states(value_states)
  54. return (key_states, value_states)
  55. def _reorder_cache_from_falcon_to_bloom(self, key_value: KVCache) -> KVCache:
  56. key_states, value_states = key_value
  57. if self.config.new_decoder_architecture:
  58. key_states = self._collapse_states(key_states)
  59. value_states = self._collapse_states(value_states)
  60. assert key_states.shape == value_states.shape # Both are [batch_size * num_kv_heads, seq_len, head_dim]
  61. key_states = key_states.permute(0, 2, 1)
  62. return (key_states, value_states)
  63. def _expand_states(self, state: torch.Tensor) -> torch.Tensor:
  64. batch_size_x_num_kv_heads, seq_len, head_dim = state.shape
  65. batch_size = batch_size_x_num_kv_heads // self.config.num_kv_heads
  66. state = state.view(batch_size, self.config.num_kv_heads, 1, seq_len, head_dim)
  67. state = state.expand(-1, -1, self.config.num_key_value_groups, -1, -1) # No copy
  68. state = state.reshape(batch_size * self.config.num_attention_heads, seq_len, head_dim) # Involves a copy
  69. return state
  70. def _collapse_states(self, state: torch.Tensor) -> torch.Tensor:
  71. batch_size_x_num_attn_heads, seq_len, head_dim = state.shape
  72. batch_size = batch_size_x_num_attn_heads // self.config.num_attention_heads
  73. state = state.view(batch_size, self.config.num_kv_heads, self.config.num_key_value_groups, seq_len, head_dim)
  74. state = state[:, :, 0]
  75. state = state.view(batch_size * self.config.num_kv_heads, seq_len, head_dim)
  76. return state
  77. class UnoptimizedWrappedLlamaBlock(LlamaDecoderLayer):
  78. def forward(
  79. self,
  80. hidden_states: torch.Tensor,
  81. *args,
  82. attention_mask: Optional[torch.Tensor] = None,
  83. position_ids: Optional[torch.LongTensor] = None,
  84. layer_past: Optional[Tuple[torch.Tensor]] = None,
  85. use_cache: bool = False,
  86. **kwargs,
  87. ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
  88. batch_size, seq_length, _ = hidden_states.shape
  89. seq_length_with_past = seq_length
  90. past_key_values_length = 0
  91. past_key_value = layer_past
  92. if past_key_value is not None:
  93. past_key_values_length = past_key_value[0].shape[2]
  94. seq_length_with_past = seq_length_with_past + past_key_values_length
  95. past_key_value = self._reorder_cache_from_bloom_to_llama(past_key_value, batch_size, past_key_values_length)
  96. elif use_cache:
  97. past_key_value = DynamicCache()
  98. if position_ids is None:
  99. device = hidden_states.device
  100. position_ids = torch.arange(
  101. past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
  102. )
  103. position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
  104. else:
  105. position_ids = position_ids.view(-1, seq_length).long()
  106. # embed positions
  107. if attention_mask is None:
  108. attention_mask = torch.ones(
  109. (batch_size, seq_length_with_past), dtype=torch.bool, device=hidden_states.device
  110. )
  111. attention_mask = _prepare_4d_causal_attention_mask(
  112. attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length
  113. )
  114. outputs = super().forward(
  115. hidden_states,
  116. *args,
  117. attention_mask=attention_mask,
  118. position_ids=position_ids,
  119. past_key_value=past_key_value,
  120. use_cache=use_cache,
  121. **kwargs,
  122. )
  123. if use_cache:
  124. present_key_value = outputs[-1]
  125. present_key_value = self._reorder_cache_from_llama_to_bloom(
  126. present_key_value, batch_size, seq_length_with_past
  127. )
  128. outputs = outputs[:-1] + (present_key_value,)
  129. return outputs
  130. def _reorder_cache_from_bloom_to_llama(
  131. self, key_value: Tuple[torch.Tensor], batch_size: int, seq_length: int
  132. ) -> DynamicCache:
  133. key_states, value_states = key_value
  134. key_states = key_states.permute(0, 2, 1)
  135. key_states = key_states.view(
  136. batch_size, self.self_attn.num_key_value_heads, seq_length, self.self_attn.head_dim
  137. )
  138. value_states = value_states.view(*key_states.shape)
  139. past_key_values = ((key_states, value_states),)
  140. return DynamicCache.from_legacy_cache(past_key_values)
  141. def _reorder_cache_from_llama_to_bloom(
  142. self, key_value: DynamicCache, batch_size: int, seq_length: int
  143. ) -> Tuple[torch.Tensor]:
  144. key_states, value_states = key_value.to_legacy_cache()[0]
  145. value_states = value_states.view(
  146. batch_size * self.self_attn.num_key_value_heads, seq_length, self.self_attn.head_dim
  147. )
  148. key_states = key_states.view(*value_states.shape)
  149. key_states = key_states.permute(0, 2, 1)
  150. return (key_states, value_states)
  151. @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
  152. @pytest.mark.forked
  153. def test_optimized_block(device):
  154. if device == "cuda:0" and not torch.cuda.is_available():
  155. pytest.skip("CUDA tests can be run only in CUDA-enabled setups")
  156. config = AutoDistributedConfig.from_pretrained(MODEL_NAME)
  157. tensor_parallel_devices = (device,)
  158. dtype = torch.bfloat16
  159. quant_type = QuantType.NONE
  160. block_idx = 1
  161. block = get_model_block(config, layer_idx=block_idx).to(dtype)
  162. block = convert_block(block, block_idx, config, tensor_parallel_devices, device, quant_type=quant_type, freeze=True)
  163. if config.model_type == "falcon":
  164. unopt_block = UnoptimizedWrappedFalconBlock(config).to(dtype)
  165. elif config.model_type == "llama":
  166. unopt_block = UnoptimizedWrappedLlamaBlock(config, layer_idx=0).to(dtype)
  167. else:
  168. pytest.skip(f"This test is not applicable to {config.model_type} models")
  169. unopt_block = convert_block(
  170. unopt_block, block_idx, config, tensor_parallel_devices, device, quant_type=quant_type, freeze=True
  171. )
  172. unopt_block.load_state_dict(block.state_dict())
  173. cache = unopt_cache = None
  174. with torch.inference_mode():
  175. for length in [10, 1, 1, 1]:
  176. dummy_input = torch.randn(1, length, config.hidden_size, device=device, dtype=dtype)
  177. block_output, cache = block(dummy_input, layer_past=cache, use_cache=True)
  178. unopt_block_output, unopt_cache = unopt_block(dummy_input, layer_past=unopt_cache, use_cache=True)
  179. assert torch.allclose(block_output, unopt_block_output, atol=1e-6, rtol=0), length
  180. assert torch.allclose(cache[0], unopt_cache[0], atol=1e-6, rtol=0), length
  181. assert torch.allclose(cache[1], unopt_cache[1], atol=1e-6, rtol=0), length