|
@@ -55,13 +55,20 @@ class TransformerBackend(ModuleBackend):
|
|
|
self.inference_step, max_batch_size=self.forward_pool.max_batch_size, name=f"{self.name}_inference"
|
|
|
)
|
|
|
self.dtype = backend_dtype if backend_dtype else self.module.input_layernorm.weight.dtype
|
|
|
- self.inference_schema = (self.args_schema, self.kwargs_schema, BatchTensorDescriptor((), dtype=torch.int64))
|
|
|
+ self.inference_schema = (
|
|
|
+ (
|
|
|
+ *self.args_schema,
|
|
|
+ BatchTensorDescriptor((), dtype=self.dtype),
|
|
|
+ BatchTensorDescriptor((), dtype=torch.int64),
|
|
|
+ ),
|
|
|
+ self.kwargs_schema,
|
|
|
+ )
|
|
|
|
|
|
def inference_step(self, cache_metadata: torch.IntTensor, *inputs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
|
|
|
with torch.inference_mode():
|
|
|
attention_cache_handle = int(cache_metadata[0, 0].item())
|
|
|
prefix_length = int(cache_metadata[0, 1].item())
|
|
|
- hidden_states, hypo_ids = inputs # todo: in future, it would be best to support attention mask here
|
|
|
+ hidden_states, hypo_ids, prompts = inputs # todo: in future, it would be best to support attention mask here
|
|
|
assert (
|
|
|
hidden_states.ndim == 3
|
|
|
), "expected hidden states to be 3-dimensional: [batch_size, seq_len, hid_size]"
|
|
@@ -72,7 +79,7 @@ class TransformerBackend(ModuleBackend):
|
|
|
layer_past = past_k, past_v = cache[0, hypo_ids, arange], cache[1, hypo_ids, arange]
|
|
|
print("METADATA:", cache_metadata, past_k.shape, past_v.shape)
|
|
|
hidden_states, (new_k, new_v) = self.module.forward(
|
|
|
- hidden_states, layer_past=layer_past, use_cache=True
|
|
|
+ hidden_states, layer_past=layer_past, use_cache=True, prompts=prompts
|
|
|
)
|
|
|
|
|
|
# todo remove these asserts once we pass all tests
|