|
@@ -1,9 +1,9 @@
|
|
|
"""Code for serving bloom blocks via hivemind-server"""
|
|
|
from queue import Empty
|
|
|
-from typing import Sequence, Tuple
|
|
|
+from typing import Sequence, Tuple, Dict, Any
|
|
|
|
|
|
import torch
|
|
|
-from hivemind import use_hivemind_log_handler
|
|
|
+from hivemind import use_hivemind_log_handler, BatchTensorDescriptor
|
|
|
from hivemind.moe.server.module_backend import ModuleBackend
|
|
|
from hivemind.moe.server.task_pool import TaskPool
|
|
|
from hivemind.utils import InvalidStateError, get_logger
|
|
@@ -56,19 +56,21 @@ class TransformerBackend(ModuleBackend):
|
|
|
self.inference_pool = InferenceTaskPool(
|
|
|
self.inference_step, max_batch_size=self.forward_pool.max_batch_size, name=f"{self.name}_inference"
|
|
|
)
|
|
|
+ self.inference_schema = (self.args_schema, self.kwargs_schema, BatchTensorDescriptor((), dtype=torch.int64))
|
|
|
|
|
|
def inference_step(self, cache_metadata: torch.IntTensor, *inputs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
|
|
|
with torch.inference_mode():
|
|
|
attention_cache_handle = int(cache_metadata[0, 0].item())
|
|
|
prefix_length = int(cache_metadata[0, 1].item())
|
|
|
- hidden_states = inputs[0] # todo: in future, it would be best to support attention mask here
|
|
|
+ hidden_states, hypo_ids = inputs # todo: in future, it would be best to support attention mask here
|
|
|
assert (
|
|
|
hidden_states.ndim == 3
|
|
|
), "expected hidden states to be 3-dimensional: [batch_size, seq_len, hid_size]"
|
|
|
|
|
|
with self.memory_cache.use_cache(attention_cache_handle) as cache:
|
|
|
assert isinstance(self.module, BloomBlock) and cache.shape[0] == 2 and cache.ndim == 5
|
|
|
- layer_past = past_k, past_v = cache[0, :, :prefix_length], cache[1, :, :prefix_length]
|
|
|
+ arange = torch.arange(prefix_length)
|
|
|
+ layer_past = past_k, past_v = cache[0, hypo_ids, arange], cache[1, hypo_ids, arange]
|
|
|
print("METADATA:", cache_metadata, past_k.shape, past_v.shape)
|
|
|
hidden_states, (new_k, new_v) = self.module.forward(
|
|
|
hidden_states, layer_past=layer_past, use_cache=True
|
|
@@ -88,3 +90,7 @@ class TransformerBackend(ModuleBackend):
|
|
|
|
|
|
def get_pools(self) -> Sequence[TaskPool]:
|
|
|
return self.forward_pool, self.backward_pool, self.inference_pool
|
|
|
+
|
|
|
+ def get_info(self) -> Dict[str, Any]:
|
|
|
+ """Get expert parameters and stats. Used by RemoteExpert to check shapes and for DMoE orchestration."""
|
|
|
+ return dict(super().get_info(), inference_schema=self.inference_schema)
|