bert_sparse_self_attention.py 3.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. from torch import nn
  5. from deepspeed.ops.sparse_attention import SparseSelfAttention, FixedSparsityConfig
  6. class BertSparseSelfAttention(nn.Module):
  7. """Implements Sparse Self Attention layer of Bert model based on https://github.com/microsoft/DeepSpeedExamples/blob/master/bing_bert/nvidia/modelingpreln.py#L373
  8. For more information please see, TODO DeepSpeed Sparse Transformer.
  9. For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
  10. """
  11. def __init__(
  12. self,
  13. config,
  14. # SparsityConfig parameters needs to be set accordingly
  15. sparsity_config=FixedSparsityConfig(num_heads=4)):
  16. """Initialize the bert sparse self attention layer.
  17. Note) you can use any of the provided sparsity configs or simply add yours!
  18. Arguments:
  19. config: required: Bert model config
  20. sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on FixedSparsityConfig class.
  21. """
  22. super(BertSparseSelfAttention, self).__init__()
  23. if config.hidden_size % config.num_attention_heads != 0:
  24. raise ValueError("The hidden size (%d) is not a multiple of the number of attention "
  25. "heads (%d)" % (config.hidden_size, config.num_attention_heads))
  26. self.num_attention_heads = config.num_attention_heads
  27. self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
  28. self.all_head_size = self.num_attention_heads * self.attention_head_size
  29. self.query = nn.Linear(config.hidden_size, self.all_head_size)
  30. self.key = nn.Linear(config.hidden_size, self.all_head_size)
  31. self.value = nn.Linear(config.hidden_size, self.all_head_size)
  32. self.sparse_self_attention = SparseSelfAttention(sparsity_config)
  33. def transpose_for_scores(self, x):
  34. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  35. x = x.view(*new_x_shape)
  36. return x.permute(0, 2, 1, 3)
  37. def forward(self, hidden_states, attention_mask):
  38. """Applies forward phase of bert sparse self attention
  39. Arguments:
  40. hidden_states: required: hidden_states tensor of the bert model
  41. attn_mask: required: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
  42. Return:
  43. context_layer: a dense tensor containing attention context
  44. """
  45. mixed_query_layer = self.query(hidden_states)
  46. mixed_key_layer = self.key(hidden_states)
  47. mixed_value_layer = self.value(hidden_states)
  48. query_layer = self.transpose_for_scores(mixed_query_layer)
  49. key_layer = self.transpose_for_scores(mixed_key_layer)
  50. value_layer = self.transpose_for_scores(mixed_value_layer)
  51. context_layer = self.sparse_self_attention(query_layer,
  52. key_layer,
  53. value_layer,
  54. key_padding_mask=attention_mask)
  55. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  56. new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size, )
  57. context_layer = context_layer.view(*new_context_layer_shape)
  58. return context_layer