|
@@ -1,9 +1,13 @@
|
|
"""Code for serving bloom blocks via hivemind-server"""
|
|
"""Code for serving bloom blocks via hivemind-server"""
|
|
from queue import Empty
|
|
from queue import Empty
|
|
-from typing import Optional, Sequence, Tuple
|
|
|
|
|
|
+<<<<<<< HEAD
|
|
|
|
+from typing import Sequence, Tuple, Dict, Any, Optional
|
|
|
|
+=======
|
|
|
|
+from typing import Sequence, Tuple, Dict, Any
|
|
|
|
+>>>>>>> 79a9ff2b2ea0c2601e3670f9a28e84e8a511247d
|
|
|
|
|
|
import torch
|
|
import torch
|
|
-from hivemind import use_hivemind_log_handler
|
|
|
|
|
|
+from hivemind import use_hivemind_log_handler, BatchTensorDescriptor
|
|
from hivemind.moe.server.module_backend import ModuleBackend
|
|
from hivemind.moe.server.module_backend import ModuleBackend
|
|
from hivemind.moe.server.task_pool import TaskPool
|
|
from hivemind.moe.server.task_pool import TaskPool
|
|
from hivemind.utils import InvalidStateError, get_logger
|
|
from hivemind.utils import InvalidStateError, get_logger
|
|
@@ -14,6 +18,34 @@ from src.server.cache import MemoryCache
|
|
use_hivemind_log_handler("in_root_logger")
|
|
use_hivemind_log_handler("in_root_logger")
|
|
logger = get_logger(__file__)
|
|
logger = get_logger(__file__)
|
|
|
|
|
|
|
|
+<<<<<<< HEAD
|
|
|
|
+
|
|
|
|
+class InferenceTaskPool(TaskPool):
|
|
|
|
+ def __init__(self, *args, **kwargs):
|
|
|
|
+ super().__init__(*args, **kwargs)
|
|
|
|
+
|
|
|
|
+ assert self.min_batch_size == 1, "min_batch_size in InferenceTaskPool cannot be greater 1"
|
|
|
|
+
|
|
|
|
+ def iterate_minibatches(self, *args, **kwargs):
|
|
|
|
+ """Form minibatches by grouping one or more tasks together up to self.max_batch_size"""
|
|
|
|
+
|
|
|
|
+ while True:
|
|
|
|
+ try:
|
|
|
|
+ logger.debug(f"{self.name} getting next task")
|
|
|
|
+ task = self.tasks.get(timeout=self.timeout)
|
|
|
|
+ except Empty:
|
|
|
|
+ logger.warning(f"Timeout reached but batch doesn't contain >={self.min_batch_size} elements yet")
|
|
|
|
+ continue
|
|
|
|
+
|
|
|
|
+ try:
|
|
|
|
+ if task.future.set_running_or_notify_cancel():
|
|
|
|
+ yield [task]
|
|
|
|
+ except InvalidStateError as e:
|
|
|
|
+ logger.debug(f"Failed to add task to batch: {task.future} raised {e}")
|
|
|
|
+=======
|
|
|
|
+MAX_LENGTH = 2048
|
|
|
|
+>>>>>>> 79a9ff2b2ea0c2601e3670f9a28e84e8a511247d
|
|
|
|
+
|
|
|
|
|
|
class InferenceTaskPool(TaskPool):
|
|
class InferenceTaskPool(TaskPool):
|
|
def __init__(self, *args, **kwargs):
|
|
def __init__(self, *args, **kwargs):
|
|
@@ -55,22 +87,31 @@ class TransformerBackend(ModuleBackend):
|
|
self.inference_step, max_batch_size=self.forward_pool.max_batch_size, name=f"{self.name}_inference"
|
|
self.inference_step, max_batch_size=self.forward_pool.max_batch_size, name=f"{self.name}_inference"
|
|
)
|
|
)
|
|
self.dtype = backend_dtype if backend_dtype else self.module.input_layernorm.weight.dtype
|
|
self.dtype = backend_dtype if backend_dtype else self.module.input_layernorm.weight.dtype
|
|
|
|
+ self.inference_schema = (
|
|
|
|
+ (
|
|
|
|
+ *self.args_schema,
|
|
|
|
+ BatchTensorDescriptor((), dtype=self.dtype),
|
|
|
|
+ BatchTensorDescriptor((), dtype=torch.int64),
|
|
|
|
+ ),
|
|
|
|
+ self.kwargs_schema,
|
|
|
|
+ )
|
|
|
|
|
|
def inference_step(self, cache_metadata: torch.IntTensor, *inputs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
|
|
def inference_step(self, cache_metadata: torch.IntTensor, *inputs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
|
|
with torch.inference_mode():
|
|
with torch.inference_mode():
|
|
attention_cache_handle = int(cache_metadata[0, 0].item())
|
|
attention_cache_handle = int(cache_metadata[0, 0].item())
|
|
prefix_length = int(cache_metadata[0, 1].item())
|
|
prefix_length = int(cache_metadata[0, 1].item())
|
|
- hidden_states = inputs[0] # todo: in future, it would be best to support attention mask here
|
|
|
|
|
|
+ hidden_states, hypo_ids, prompts = inputs # todo: in future, it would be best to support attention mask here
|
|
assert (
|
|
assert (
|
|
hidden_states.ndim == 3
|
|
hidden_states.ndim == 3
|
|
), "expected hidden states to be 3-dimensional: [batch_size, seq_len, hid_size]"
|
|
), "expected hidden states to be 3-dimensional: [batch_size, seq_len, hid_size]"
|
|
|
|
|
|
with self.memory_cache.use_cache(attention_cache_handle) as cache:
|
|
with self.memory_cache.use_cache(attention_cache_handle) as cache:
|
|
assert isinstance(self.module, BloomBlock) and cache.shape[0] == 2 and cache.ndim == 5
|
|
assert isinstance(self.module, BloomBlock) and cache.shape[0] == 2 and cache.ndim == 5
|
|
- layer_past = past_k, past_v = cache[0, :, :prefix_length], cache[1, :, :prefix_length]
|
|
|
|
|
|
+ arange = torch.arange(prefix_length)
|
|
|
|
+ layer_past = past_k, past_v = cache[0, hypo_ids, arange], cache[1, hypo_ids, arange]
|
|
print("METADATA:", cache_metadata, past_k.shape, past_v.shape)
|
|
print("METADATA:", cache_metadata, past_k.shape, past_v.shape)
|
|
hidden_states, (new_k, new_v) = self.module.forward(
|
|
hidden_states, (new_k, new_v) = self.module.forward(
|
|
- hidden_states, layer_past=layer_past, use_cache=True
|
|
|
|
|
|
+ hidden_states, layer_past=layer_past, use_cache=True, prompts=prompts
|
|
)
|
|
)
|
|
|
|
|
|
# todo remove these asserts once we pass all tests
|
|
# todo remove these asserts once we pass all tests
|
|
@@ -85,3 +126,7 @@ class TransformerBackend(ModuleBackend):
|
|
|
|
|
|
def get_pools(self) -> Sequence[TaskPool]:
|
|
def get_pools(self) -> Sequence[TaskPool]:
|
|
return self.forward_pool, self.backward_pool, self.inference_pool
|
|
return self.forward_pool, self.backward_pool, self.inference_pool
|
|
|
|
+
|
|
|
|
+ def get_info(self) -> Dict[str, Any]:
|
|
|
|
+ """Get expert parameters and stats. Used by RemoteExpert to check shapes and for DMoE orchestration."""
|
|
|
|
+ return dict(super().get_info(), inference_schema=self.inference_schema)
|