test_optimized_layers.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. from typing import Optional, Tuple
  2. import pytest
  3. import torch
  4. from transformers.models.falcon.modeling_falcon import FalconDecoderLayer, FalconModel, build_alibi_tensor
  5. from petals.utils.auto_config import AutoDistributedConfig
  6. from petals.utils.convert_block import QuantType, convert_block
  7. from test_utils import MODEL_NAME
  8. KVCache = Tuple[torch.Tensor, torch.Tensor]
  9. class UnoptimizedWrappedFalconBlock(FalconDecoderLayer):
  10. def forward(
  11. self,
  12. hidden_states: torch.Tensor,
  13. *args,
  14. attention_mask: Optional[torch.Tensor] = None,
  15. alibi: Optional[torch.Tensor] = None,
  16. layer_past: Optional[KVCache] = None,
  17. use_cache: bool = False,
  18. **kwargs,
  19. ):
  20. batch_size, seq_length = hidden_states.shape[:2]
  21. if layer_past is not None:
  22. layer_past = self._reorder_cache_from_bloom_to_falcon(layer_past)
  23. past_length = 0 if layer_past is None else layer_past[0].shape[1]
  24. seq_length_with_past = seq_length + past_length
  25. attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
  26. if alibi is None and self.config.alibi:
  27. alibi = build_alibi_tensor(attention_mask, num_heads=self.num_heads, dtype=hidden_states.dtype)
  28. attention_mask = FalconModel._prepare_attn_mask(attention_mask, (batch_size, seq_length), past_length)
  29. outputs = super().forward(
  30. hidden_states,
  31. *args,
  32. attention_mask=attention_mask,
  33. alibi=alibi,
  34. layer_past=layer_past,
  35. use_cache=use_cache,
  36. **kwargs,
  37. )
  38. if use_cache:
  39. present_key_value = outputs[-1]
  40. present_key_value = self._reorder_cache_from_falcon_to_bloom(present_key_value)
  41. outputs = outputs[:-1] + (present_key_value,)
  42. return outputs
  43. def _reorder_cache_from_bloom_to_falcon(self, key_value: KVCache) -> KVCache:
  44. key_states, value_states = key_value
  45. key_states = key_states.permute(0, 2, 1)
  46. assert key_states.shape == value_states.shape # Both are [batch_size * num_kv_heads, seq_len, head_dim]
  47. if self.config.new_decoder_architecture:
  48. key_states = self._expand_states(key_states)
  49. value_states = self._expand_states(value_states)
  50. return (key_states, value_states)
  51. def _reorder_cache_from_falcon_to_bloom(self, key_value: KVCache) -> KVCache:
  52. key_states, value_states = key_value
  53. if self.config.new_decoder_architecture:
  54. key_states = self._collapse_states(key_states)
  55. value_states = self._collapse_states(value_states)
  56. assert key_states.shape == value_states.shape # Both are [batch_size * num_kv_heads, seq_len, head_dim]
  57. key_states = key_states.permute(0, 2, 1)
  58. return (key_states, value_states)
  59. def _expand_states(self, state: torch.Tensor) -> torch.Tensor:
  60. batch_size_x_num_kv_heads, seq_len, head_dim = state.shape
  61. batch_size = batch_size_x_num_kv_heads // self.config.num_kv_heads
  62. state = state.view(batch_size, self.config.num_kv_heads, 1, seq_len, head_dim)
  63. state = state.expand(-1, -1, self.config.num_key_value_groups, -1, -1) # No copy
  64. state = state.reshape(batch_size * self.config.num_attention_heads, seq_len, head_dim) # Involves a copy
  65. return state
  66. def _collapse_states(self, state: torch.Tensor) -> torch.Tensor:
  67. batch_size_x_num_attn_heads, seq_len, head_dim = state.shape
  68. batch_size = batch_size_x_num_attn_heads // self.config.num_attention_heads
  69. state = state.view(batch_size, self.config.num_kv_heads, self.config.num_key_value_groups, seq_len, head_dim)
  70. state = state[:, :, 0]
  71. state = state.view(batch_size * self.config.num_kv_heads, seq_len, head_dim)
  72. return state
  73. @pytest.mark.skipif("falcon" not in MODEL_NAME, reason="This test is applicable only to Falcon models")
  74. @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
  75. @pytest.mark.forked
  76. def test_falcon(device):
  77. if device == "cuda:0" and not torch.cuda.is_available():
  78. pytest.skip("CUDA tests can be run only in CUDA-enabled setups")
  79. config = AutoDistributedConfig.from_pretrained(MODEL_NAME)
  80. tensor_parallel_devices = (device,)
  81. dtype = torch.bfloat16
  82. quant_type = QuantType.NONE
  83. block = config.block_class(config).to(dtype)
  84. block = convert_block(block, 0, config, tensor_parallel_devices, device, quant_type=quant_type, freeze=True)
  85. unopt_block = UnoptimizedWrappedFalconBlock(config).to(dtype)
  86. unopt_block = convert_block(
  87. unopt_block, 0, config, tensor_parallel_devices, device, quant_type=quant_type, freeze=True
  88. )
  89. unopt_block.load_state_dict(block.state_dict())
  90. cache = unopt_cache = None
  91. with torch.inference_mode():
  92. for length in [10, 1, 1, 1]:
  93. dummy_input = torch.randn(1, length, config.hidden_size, device=device, dtype=dtype)
  94. block_output, cache = block(dummy_input, layer_past=cache, use_cache=True)
  95. unopt_block_output, unopt_cache = unopt_block(dummy_input, layer_past=unopt_cache, use_cache=True)
  96. assert torch.allclose(block_output, unopt_block_output, atol=1e-6, rtol=0), length
  97. assert torch.allclose(cache[0], unopt_cache[0], atol=1e-6, rtol=0), length
  98. assert torch.allclose(cache[1], unopt_cache[1], atol=1e-6, rtol=0), length