Parcourir la source

leave a todo for attention mask

justheuristic il y a 3 ans
Parent
commit
ed468af8d6
1 fichiers modifiés avec 2 ajouts et 3 suppressions
  1. 2 3
      src/server/backend.py

+ 2 - 3
src/server/backend.py

@@ -28,7 +28,7 @@ class TransformerBackend(ModuleBackend):
     def inference_step(self, cache_metadata: torch.IntTensor, *inputs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
         attention_cache_handle = int(cache_metadata[0, 0].item())
         prefix_length = int(cache_metadata[0, 1].item())
-        hidden_states, *_ = inputs
+        hidden_states, *_ = inputs  # todo: this ignores any extra inputs for now; in future, it would be best to support attention mask as an extra input
         assert hidden_states.ndim == 3, "expected hidden states to be 3-dimensional: [batch_size, seq_len, hid_size]"
 
         with self.memory_cache.use_cache(attention_cache_handle) as cache:
@@ -38,7 +38,6 @@ class TransformerBackend(ModuleBackend):
             print(past_k.shape, past_v.shape)
             hidden_states, (new_k, new_v) = self.module.forward(hidden_states, layer_past=layer_past, use_cache=True)
 
-
             # todo remove these debugprints
             new_length = new_v.shape[1]
             assert new_length > prefix_length
@@ -47,7 +46,7 @@ class TransformerBackend(ModuleBackend):
             assert new_k.shape[2:] == past_k.shape[2:] and new_v.shape[2:] == past_v.shape[2:]
             assert torch.allclose(new_v[:, :past_v.shape[1]], past_v)
             assert torch.allclose(new_k[:, :past_k.shape[1]], past_k)
-            cache[0, :, prefix_length: new_length, :] = new_k[:, prefix_length : new_length]
+            cache[0, :, prefix_length: new_length, :] = new_k[:, prefix_length: new_length]
             cache[1, :, prefix_length: new_length, :] = new_v[:, prefix_length: new_length]
             return (hidden_states,)