block.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. """
  2. Bloom intermediate layer
  3. Based on https://github.com/huggingface/transformers/commit/ca2a55e9dfb245527b5e1c954fec6ffbb7aef07b
  4. See commit history for authorship.
  5. """
  6. import math
  7. import torch
  8. import torch.nn as nn
  9. import torch.nn.quantized.dynamic.modules.linear
  10. from src.bloom.ops import (
  11. BloomGelu,
  12. BloomScaledSoftmax,
  13. attention_mask_func,
  14. build_alibi_tensor,
  15. dropout_add,
  16. pre_process_alibi_for_pad,
  17. split_tensor_along_last_dim,
  18. )
  19. class BloomAttention(nn.Module):
  20. def __init__(self, config, layer_number=None):
  21. super().__init__()
  22. self.hidden_size = config.hidden_size
  23. self.num_heads = config.n_head
  24. self.head_dim = self.hidden_size // self.num_heads
  25. self.split_size = self.hidden_size
  26. self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
  27. self.masked_softmax_fusion = config.masked_softmax_fusion
  28. self.hidden_dropout = config.hidden_dropout
  29. if self.head_dim * self.num_heads != self.hidden_size:
  30. raise ValueError(
  31. f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:"
  32. f" {self.num_heads})."
  33. )
  34. # Layer-wise attention scaling
  35. self.layer_number = max(1, layer_number)
  36. self.norm_factor = math.sqrt(self.head_dim) * self.layer_number
  37. # Scaled Softmax
  38. self.scale_mask_softmax = BloomScaledSoftmax(
  39. self.masked_softmax_fusion,
  40. attention_mask_func,
  41. self.attention_softmax_in_fp32,
  42. self.layer_number,
  43. )
  44. self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True)
  45. self.dense = nn.Linear(self.hidden_size, self.hidden_size)
  46. self.attention_dropout = nn.Dropout(config.attention_dropout)
  47. def forward(
  48. self,
  49. hidden_states,
  50. residual,
  51. layer_past=None,
  52. attention_mask=None,
  53. alibi=None,
  54. head_mask=None,
  55. use_cache=False,
  56. output_attentions=False,
  57. DEBUG_INPLACE_PAST: bool = True
  58. ):
  59. if DEBUG_INPLACE_PAST:
  60. past_key, past_value, past_length = layer_past
  61. current_sequence_length = hidden_states.shape[1] + past_length
  62. else:
  63. current_sequence_length = hidden_states.shape[1] + (0 if layer_past is None else layer_past[0].shape[1])
  64. if alibi is None:
  65. alibi = build_alibi_tensor(
  66. current_sequence_length, n_head=self.num_heads, dtype=hidden_states.dtype, device=hidden_states.device
  67. )
  68. # hidden_states: [batch_size, seq_length, hidden_size]
  69. # apply preprocessing if the input is padded
  70. if attention_mask is not None:
  71. alibi = pre_process_alibi_for_pad(alibi, attention_mask)
  72. # otherwise repeat alibi tensor with the batch size
  73. else:
  74. alibi = alibi.repeat(hidden_states.shape[0], 1, 1)
  75. mixed_x_layer = self.query_key_value(hidden_states)
  76. # [batch_size, seq_length, 3 x hidden_size] --> [batch_size, seq_length, num_heads, 3 x head_dim]
  77. new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_heads, 3 * self.head_dim)
  78. mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
  79. # [batch_size, seq_length, num_heads, 3 x head_dim] --> 3 [batch_size, seq_length, num_heads, head_dim]
  80. (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
  81. if DEBUG_INPLACE_PAST:
  82. past_key, past_value, past_length = layer_past
  83. assert past_key.dtype == key_layer.dtype
  84. assert past_key.shape[1] == 2048
  85. assert not torch.is_grad_enabled()
  86. past_key[:, past_length: past_length + key_layer.shape[1]] = key_layer.type_as(past_key)
  87. past_value[:, past_length: past_length + value_layer.shape[1]] = value_layer.type_as(past_value)
  88. key_layer = past_key[:, :current_sequence_length, ...]
  89. value_layer = past_value[:, :current_sequence_length, ...]
  90. elif layer_past is not None:
  91. assert False, "TODO ENABLE INPLACE"
  92. past_key, past_value = layer_past
  93. key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=1)
  94. value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=1)
  95. if use_cache is True:
  96. present = (key_layer, value_layer)
  97. else:
  98. present = None
  99. # [batch_size, head_dim, q_length, k_length]
  100. output_size = (query_layer.size(0), query_layer.size(2), query_layer.size(1), key_layer.size(1))
  101. # [batch_size, q_length, num_heads, head_dim] -> [q_length, batch_size * num_heads, head_dim]
  102. query_layer = query_layer.transpose(1, 0).reshape(output_size[2], output_size[0] * output_size[1], -1)
  103. # [batch_size, k_length, num_heads, head_dim] -> [k_length, batch_size * num_heads, head_dim]
  104. key_layer = key_layer.transpose(1, 0).reshape(output_size[3], output_size[0] * output_size[1], -1)
  105. # Raw attention scores. [batch_size * num_heads, q_length, k_length]
  106. beta = 1.0 / self.layer_number
  107. matmul_result = torch.baddbmm(
  108. alibi,
  109. query_layer.transpose(1, 0),
  110. key_layer.transpose(1, 0).transpose(1, 2),
  111. beta=beta,
  112. alpha=(1.0 / self.norm_factor),
  113. )
  114. # change view to [batch_size, num_heads, q_length, k_length]
  115. attention_scores = matmul_result.view(*output_size)
  116. # attention scores and attention mask [b, np, sq, sk]
  117. max_positions = max(attention_scores.shape[-1], attention_scores.shape[-2])
  118. attention_probs = self.scale_mask_softmax(attention_scores, attention_mask, max_positions).to(value_layer.dtype)
  119. attention_probs = self.attention_dropout(attention_probs)
  120. if head_mask is not None:
  121. attention_probs = attention_probs * head_mask
  122. # context layer shape: [batch_size, num_heads, q_length, head_dim]
  123. output_size = (value_layer.size(0), value_layer.size(2), query_layer.size(0), value_layer.size(3))
  124. # change view [k_length, batch_size x num_heads, head_dim]
  125. value_layer = value_layer.transpose(1, 0).reshape(value_layer.size(1), output_size[0] * output_size[1], -1)
  126. # change view [batch_size x num_heads, q_length, k_length]
  127. attention_probs_reshaped = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
  128. # matmul: [batch_size * num_heads, q_length, head_dim]
  129. context_layer = torch.bmm(attention_probs_reshaped, value_layer.transpose(0, 1))
  130. # change view [batch_size, num_heads, q_length, head_dim]
  131. context_layer = context_layer.view(*output_size)
  132. # [batchs_size, num_heads, q_length, head_dim] --> [q_length, batch_size, num_heads, head_dim]
  133. context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
  134. # [q_length, batch_size, num_heads, head_dim] --> [q_length, batch_size, hidden_size]
  135. new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)
  136. context_layer = context_layer.view(*new_context_layer_shape)
  137. # Output. [q_length, batch_size, hidden_size]
  138. # aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232
  139. output_tensor = self.dense(context_layer)
  140. output = output_tensor.transpose(1, 0)
  141. output = dropout_add(output, residual, self.hidden_dropout, self.training)
  142. outputs = (output, present)
  143. if output_attentions:
  144. outputs += (attention_probs,)
  145. return outputs
  146. class BloomMLP(nn.Module):
  147. def __init__(self, config):
  148. super().__init__()
  149. self.hidden_size = config.hidden_size
  150. self.dense_h_to_4h = nn.Linear(self.hidden_size, 4 * self.hidden_size)
  151. self.dense_4h_to_h = nn.Linear(4 * self.hidden_size, self.hidden_size)
  152. self.hidden_dropout = config.hidden_dropout
  153. self.gelu_impl = BloomGelu()
  154. def forward(self, hidden_states, residual):
  155. hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states))
  156. intermediate_output = self.dense_4h_to_h(hidden_states)
  157. output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training)
  158. return output
  159. class BloomBlock(nn.Module):
  160. def __init__(self, config, layer_number=None):
  161. super().__init__()
  162. self.hidden_size = config.hidden_size
  163. self.input_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon)
  164. self.n_head = config.n_head
  165. self.self_attention = BloomAttention(config, layer_number=layer_number)
  166. self.post_attention_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon)
  167. self.mlp = BloomMLP(config)
  168. self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
  169. self.hidden_dropout = config.hidden_dropout
  170. def forward(
  171. self,
  172. hidden_states,
  173. layer_past=None,
  174. attention_mask=None,
  175. head_mask=None,
  176. use_cache=False,
  177. output_attentions=False,
  178. alibi=None,
  179. DEBUG_INPLACE_PAST=True
  180. ):
  181. # hidden_states: [batch_size, seq_length, hidden_size]
  182. # Layer norm at the beginning of the transformer layer.
  183. layernorm_output = self.input_layernorm(hidden_states)
  184. # Layer norm post the self attention.
  185. if self.apply_residual_connection_post_layernorm:
  186. residual = layernorm_output
  187. else:
  188. residual = hidden_states
  189. # Self attention.
  190. attn_outputs = self.self_attention(
  191. layernorm_output,
  192. residual,
  193. layer_past=layer_past,
  194. attention_mask=attention_mask,
  195. alibi=alibi,
  196. head_mask=head_mask,
  197. use_cache=use_cache,
  198. output_attentions=output_attentions,
  199. DEBUG_INPLACE_PAST=DEBUG_INPLACE_PAST
  200. )
  201. attention_output = attn_outputs[0]
  202. outputs = attn_outputs[1:]
  203. layernorm_output = self.post_attention_layernorm(attention_output)
  204. # Get residual
  205. if self.apply_residual_connection_post_layernorm:
  206. residual = layernorm_output
  207. else:
  208. residual = attention_output
  209. # MLP.
  210. output = self.mlp(layernorm_output, residual)
  211. if use_cache:
  212. outputs = (output,) + outputs
  213. else:
  214. outputs = (output,) + outputs[1:]
  215. return outputs # hidden_states, present, attentions