|
@@ -1,17 +1,15 @@
|
|
from __future__ import annotations
|
|
from __future__ import annotations
|
|
|
|
|
|
-import contextlib
|
|
|
|
import logging
|
|
import logging
|
|
-import random
|
|
|
|
from typing import Optional, Union
|
|
from typing import Optional, Union
|
|
|
|
|
|
import torch
|
|
import torch
|
|
from hivemind import DHT, P2P, get_logger, use_hivemind_log_handler
|
|
from hivemind import DHT, P2P, get_logger, use_hivemind_log_handler
|
|
from hivemind.moe.client.remote_expert_worker import RemoteExpertWorker
|
|
from hivemind.moe.client.remote_expert_worker import RemoteExpertWorker
|
|
-from hivemind.moe.expert_uid import ExpertInfo
|
|
|
|
from torch import nn
|
|
from torch import nn
|
|
|
|
|
|
import src
|
|
import src
|
|
|
|
+from src.client.inference_session import RemoteSequentialInferenceSession
|
|
from src.client.remote_block import RemoteTransformerBlock
|
|
from src.client.remote_block import RemoteTransformerBlock
|
|
from src.client.sequence_manager import RemoteSequenceManager
|
|
from src.client.sequence_manager import RemoteSequenceManager
|
|
from src.data_structures import UID_DELIMITER
|
|
from src.data_structures import UID_DELIMITER
|
|
@@ -30,49 +28,41 @@ class RemoteSequential(nn.Module):
|
|
self,
|
|
self,
|
|
config: src.DistributedBloomConfig,
|
|
config: src.DistributedBloomConfig,
|
|
dht: DHT,
|
|
dht: DHT,
|
|
- prefix: str,
|
|
|
|
- max_retries: int = 3,
|
|
|
|
|
|
+ dht_prefix: Optional[str] = None,
|
|
p2p: Optional[P2P] = None,
|
|
p2p: Optional[P2P] = None,
|
|
sequence_manager: Optional[RemoteSequenceManager] = None,
|
|
sequence_manager: Optional[RemoteSequenceManager] = None,
|
|
):
|
|
):
|
|
logger.warning(f"{self.__class__.__name__} is in active development; expect adventures")
|
|
logger.warning(f"{self.__class__.__name__} is in active development; expect adventures")
|
|
- if prefix.endswith(UID_DELIMITER):
|
|
|
|
- logger.warning(
|
|
|
|
- f"dht_prefix {prefix} already ends with '{UID_DELIMITER}'."
|
|
|
|
- f"This will cause {self.__class__.__name__} to look for modules under "
|
|
|
|
- f"{prefix}{UID_DELIMITER}*. Please make sure this is what you intended."
|
|
|
|
- )
|
|
|
|
-
|
|
|
|
super().__init__()
|
|
super().__init__()
|
|
self.config = config
|
|
self.config = config
|
|
self.dht = dht
|
|
self.dht = dht
|
|
- self.prefix = prefix
|
|
|
|
- self.max_retries = max_retries
|
|
|
|
|
|
+ self.dht_prefix = dht_prefix or config.dht_prefix
|
|
self.p2p = RemoteExpertWorker.run_coroutine(dht.replicate_p2p()) if p2p is None else p2p
|
|
self.p2p = RemoteExpertWorker.run_coroutine(dht.replicate_p2p()) if p2p is None else p2p
|
|
|
|
|
|
- block_uids = [f"{prefix}{UID_DELIMITER}{i}" for i in range(config.n_layer)]
|
|
|
|
|
|
+ num_blocks = self.config.n_layer if sequence_manager is None else len(sequence_manager)
|
|
|
|
+ block_uids = [f"{config.dht_prefix}{UID_DELIMITER}{i}" for i in range(num_blocks)]
|
|
if sequence_manager is None:
|
|
if sequence_manager is None:
|
|
logger.debug(f"Creating new sequence manager for block uids: {block_uids}")
|
|
logger.debug(f"Creating new sequence manager for block uids: {block_uids}")
|
|
- self.sequence_manager = RemoteSequenceManager(dht, block_uids)
|
|
|
|
|
|
+ self.sequence_manager = RemoteSequenceManager(dht, block_uids, self.p2p)
|
|
self.is_subsequence = False
|
|
self.is_subsequence = False
|
|
else:
|
|
else:
|
|
|
|
+ logger.debug(f"Reusing sequence manager with {len(sequence_manager)} modules")
|
|
|
|
+ self.sequence_manager = sequence_manager
|
|
assert isinstance(sequence_manager.block_uids, list)
|
|
assert isinstance(sequence_manager.block_uids, list)
|
|
- logger.debug(f"Reusing sequence manager with {len(self.sequence_manager)}")
|
|
|
|
self.is_subsequence = self.sequence_manager.block_uids == block_uids
|
|
self.is_subsequence = self.sequence_manager.block_uids == block_uids
|
|
|
|
|
|
def forward(self, inputs: torch.Tensor):
|
|
def forward(self, inputs: torch.Tensor):
|
|
assert isinstance(inputs, torch.Tensor) and inputs.ndim == 3 and inputs.shape[-1] == self.config.n_embed
|
|
assert isinstance(inputs, torch.Tensor) and inputs.ndim == 3 and inputs.shape[-1] == self.config.n_embed
|
|
- for block_index in range(self.config.n_layer):
|
|
|
|
- for retry_index in range(self.max_retries):
|
|
|
|
|
|
+ for block in iter(self):
|
|
|
|
+ for retry_index in range(self.sequence_manager.max_retries):
|
|
try:
|
|
try:
|
|
- block = self[block_index]
|
|
|
|
(outputs,) = block(inputs)
|
|
(outputs,) = block(inputs)
|
|
assert isinstance(outputs, torch.Tensor)
|
|
assert isinstance(outputs, torch.Tensor)
|
|
assert outputs.shape == inputs.shape, f"Expected {block} output {inputs.shape}, got {outputs.shape}"
|
|
assert outputs.shape == inputs.shape, f"Expected {block} output {inputs.shape}, got {outputs.shape}"
|
|
inputs = outputs
|
|
inputs = outputs
|
|
break
|
|
break
|
|
except Exception as e:
|
|
except Exception as e:
|
|
- if retry_index == self.max_retries - 1:
|
|
|
|
|
|
+ if retry_index == self.sequence_manager.max_retries - 1:
|
|
raise e
|
|
raise e
|
|
else:
|
|
else:
|
|
logging.debug(f"Caught {e} when running forward for block {block_index}", exc_info=True)
|
|
logging.debug(f"Caught {e} when running forward for block {block_index}", exc_info=True)
|
|
@@ -81,21 +71,20 @@ class RemoteSequential(nn.Module):
|
|
def __getitem__(self, ix: Union[int, slice]) -> Union[RemoteTransformerBlock, RemoteSequential]:
|
|
def __getitem__(self, ix: Union[int, slice]) -> Union[RemoteTransformerBlock, RemoteSequential]:
|
|
assert isinstance(ix, (int, slice))
|
|
assert isinstance(ix, (int, slice))
|
|
if isinstance(ix, int):
|
|
if isinstance(ix, int):
|
|
- assert 0 <= ix < self.config.n_layer
|
|
|
|
|
|
+ assert 0 <= ix < len(self)
|
|
(module,) = _create_remote_modules_from_infos([self.sequence_manager.block_infos[ix]], self.p2p)
|
|
(module,) = _create_remote_modules_from_infos([self.sequence_manager.block_infos[ix]], self.p2p)
|
|
return module
|
|
return module
|
|
else:
|
|
else:
|
|
return RemoteSequential(
|
|
return RemoteSequential(
|
|
self.config,
|
|
self.config,
|
|
self.dht,
|
|
self.dht,
|
|
- prefix=self.prefix,
|
|
|
|
- max_retries=self.max_retries,
|
|
|
|
|
|
+ dht_prefix=self.dht_prefix,
|
|
p2p=self.p2p,
|
|
p2p=self.p2p,
|
|
sequence_manager=self.sequence_manager[ix],
|
|
sequence_manager=self.sequence_manager[ix],
|
|
)
|
|
)
|
|
|
|
|
|
def __iter__(self):
|
|
def __iter__(self):
|
|
- for block_index in range(self.config.n_layer):
|
|
|
|
|
|
+ for block_index in range(len(self)):
|
|
yield self[block_index]
|
|
yield self[block_index]
|
|
|
|
|
|
def __len__(self):
|
|
def __len__(self):
|
|
@@ -105,56 +94,5 @@ class RemoteSequential(nn.Module):
|
|
self.sequence_manager.update_()
|
|
self.sequence_manager.update_()
|
|
return RemoteSequentialInferenceSession(self.sequence_manager, self.p2p)
|
|
return RemoteSequentialInferenceSession(self.sequence_manager, self.p2p)
|
|
|
|
|
|
-
|
|
|
|
-class RemoteSequentialInferenceSession:
|
|
|
|
- """An interface to a multi-step *inference* session for a sequence of remote transformer blocks"""
|
|
|
|
-
|
|
|
|
- def __init__(self, remote_sequence_info: RemoteSequenceManager, p2p: P2P):
|
|
|
|
- self.remote_sequence_info = remote_sequence_info
|
|
|
|
- self.p2p = p2p
|
|
|
|
- self.closed = False
|
|
|
|
- self.stack = contextlib.ExitStack()
|
|
|
|
- self.active_sessions = []
|
|
|
|
-
|
|
|
|
- def __enter__(self):
|
|
|
|
- assert not self.closed
|
|
|
|
- self.stack.__enter__()
|
|
|
|
- # TODO(yozh) replace this code with a fault-tolerant chain that can be reconstructed if some peers fail
|
|
|
|
- current_block = 0
|
|
|
|
- while current_block != len(self.remote_sequence_info):
|
|
|
|
- candidate_spans = self.remote_sequence_info.spans_containing_block[current_block]
|
|
|
|
- chosen_span = random.choice(candidate_spans) # TODO this is a temporary code
|
|
|
|
- assert chosen_span.start <= current_block < chosen_span.end
|
|
|
|
-
|
|
|
|
- # TODO begin throwaway prototype code
|
|
|
|
- remote = RemoteTransformerBlock(self.remote_sequence_info.block_infos[current_block], self.p2p)
|
|
|
|
- _ = remote.info # TODO fix
|
|
|
|
- span_uids = self.remote_sequence_info.block_uids[current_block : chosen_span.end]
|
|
|
|
- remote._info = ExpertInfo(" ".join(span_uids), chosen_span.peer_id)
|
|
|
|
- self.active_sessions.append(remote.inference_session())
|
|
|
|
- self.stack.enter_context(self.active_sessions[-1])
|
|
|
|
- current_block = chosen_span.end
|
|
|
|
- # TODO end throwaway prototype code
|
|
|
|
-
|
|
|
|
- return self
|
|
|
|
-
|
|
|
|
- def step(self, inputs: torch.Tensor):
|
|
|
|
- assert not self.closed
|
|
|
|
- for session in self.active_sessions:
|
|
|
|
- outputs = session.step(inputs)
|
|
|
|
- assert outputs.shape == inputs.shape, f"expected {inputs.shape}, got {outputs.shape}"
|
|
|
|
- inputs = outputs
|
|
|
|
- return inputs
|
|
|
|
-
|
|
|
|
- def close(self, *exc_details):
|
|
|
|
- """Finish a given inference session, close the underlying connection"""
|
|
|
|
- if not self.closed:
|
|
|
|
- self.stack.__exit__(*exc_details or (None, None, None))
|
|
|
|
- self.active_sessions.clear()
|
|
|
|
- self.closed = True
|
|
|
|
-
|
|
|
|
- def __exit__(self, *exc_details):
|
|
|
|
- self.close(*exc_details)
|
|
|
|
-
|
|
|
|
- def __del__(self):
|
|
|
|
- self.close()
|
|
|
|
|
|
+ def extra_repr(self) -> str:
|
|
|
|
+ return f"modules={self.sequence_manager.block_uids[0]}..{self.sequence_manager.block_uids[-1]}"
|