|
@@ -56,9 +56,10 @@ class TransformerBackend(ModuleBackend):
|
|
|
self.inference_pool = InferenceTaskPool(
|
|
|
self.inference_step, max_batch_size=self.forward_pool.max_batch_size, name=f"{self.name}_inference"
|
|
|
)
|
|
|
- self.dtype = backend_dtype if backend_dtype else self.module.input_layernorm.weight.dtype
|
|
|
+ self.dtype = backend_dtype if backend_dtype else self.module.input_layernorm.weight.dtype
|
|
|
|
|
|
def inference_step(self, cache_metadata: torch.IntTensor, *inputs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
|
|
|
+ print('START INFERENCE STEP')
|
|
|
with torch.inference_mode():
|
|
|
attention_cache_handle = int(cache_metadata[0, 0].item())
|
|
|
prefix_length = int(cache_metadata[0, 1].item())
|
|
@@ -69,22 +70,23 @@ class TransformerBackend(ModuleBackend):
|
|
|
|
|
|
with self.memory_cache.use_cache(attention_cache_handle) as cache:
|
|
|
assert isinstance(self.module, BloomBlock) and cache.shape[0] == 2 and cache.ndim == 5
|
|
|
- layer_past = past_k, past_v = cache[0, :, :prefix_length], cache[1, :, :prefix_length]
|
|
|
- print("METADATA:", cache_metadata, past_k.shape, past_v.shape)
|
|
|
+ layer_past = cache[0, ...], cache[1, ...], prefix_length
|
|
|
+
|
|
|
+ print("AAA")
|
|
|
hidden_states, (new_k, new_v) = self.module.forward(
|
|
|
- hidden_states, layer_past=layer_past, use_cache=True
|
|
|
+ hidden_states, layer_past=layer_past, use_cache=True, DEBUG_INPLACE_PAST=True,
|
|
|
)
|
|
|
-
|
|
|
+ print("BBB")
|
|
|
# todo remove these asserts once we pass all tests
|
|
|
new_length = new_v.shape[1]
|
|
|
assert new_length > prefix_length
|
|
|
- assert new_k.shape[0] == past_k.shape[0] and new_v.shape[0] == past_v.shape[0]
|
|
|
- assert new_k.shape[1] == new_length and new_v.shape[1] == new_length
|
|
|
- assert new_k.shape[2:] == past_k.shape[2:] and new_v.shape[2:] == past_v.shape[2:]
|
|
|
- assert torch.allclose(new_v[:, : past_v.shape[1]], past_v)
|
|
|
- assert torch.allclose(new_k[:, : past_k.shape[1]], past_k)
|
|
|
- cache[0, :, prefix_length:new_length, :] = new_k[:, prefix_length:new_length]
|
|
|
- cache[1, :, prefix_length:new_length, :] = new_v[:, prefix_length:new_length]
|
|
|
+ # assert new_k.shape[0] == past_k.shape[0] and new_v.shape[0] == past_v.shape[0]
|
|
|
+ # assert new_k.shape[1] == new_length and new_v.shape[1] == new_length
|
|
|
+ # assert new_k.shape[2:] == past_k.shape[2:] and new_v.shape[2:] == past_v.shape[2:]
|
|
|
+ # assert torch.allclose(new_v[:, : past_v.shape[1]], past_v)
|
|
|
+ # assert torch.allclose(new_k[:, : past_k.shape[1]], past_k)
|
|
|
+ # cache[0, :, prefix_length:new_length, :] = new_k[:, prefix_length:new_length]
|
|
|
+ # cache[1, :, prefix_length:new_length, :] = new_v[:, prefix_length:new_length]
|
|
|
return (hidden_states,)
|
|
|
|
|
|
def get_pools(self) -> Sequence[TaskPool]:
|