inference_session.py 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. from __future__ import annotations
  2. import asyncio
  3. import contextlib
  4. from typing import AsyncIterator, List, Optional
  5. import torch
  6. from hivemind import (
  7. P2P,
  8. MSGPackSerializer,
  9. anext,
  10. deserialize_torch_tensor,
  11. get_logger,
  12. nested_flatten,
  13. serialize_torch_tensor,
  14. use_hivemind_log_handler,
  15. )
  16. from hivemind.moe.client.remote_expert_worker import RemoteExpertWorker
  17. from hivemind.p2p import StubBase
  18. from hivemind.proto import runtime_pb2
  19. from src.client.sequence_manager import RemoteSequenceManager
  20. from src.data_structures import CHAIN_DELIMITER, ModuleUID, RemoteSpanInfo, RPCInfo
  21. from src.server.handler import TransformerConnectionHandler
  22. from src.utils.misc import DUMMY, is_dummy
  23. use_hivemind_log_handler("in_root_logger")
  24. logger = get_logger(__file__)
  25. class RemoteTransformerBlockInferenceSession:
  26. """
  27. An interface to a single multi-step *inference* session for a specific remote module on a specific server
  28. :note: this inference session is *not* fault-tolerant out of the box
  29. """
  30. def __init__(
  31. self,
  32. uid: ModuleUID,
  33. rpc_info: RPCInfo,
  34. inputs_queue: asyncio.Queue,
  35. outputs_aiter: AsyncIterator,
  36. *,
  37. max_length: int,
  38. ):
  39. self.uid, self.rpc_info = uid, rpc_info
  40. self.num_blocks = uid.count(CHAIN_DELIMITER) + 1
  41. # warning: this code manages async objects that are only usable inside RemoteExpertWorker's background thread;
  42. # using them in any other EventLoop may cause side-effects including, headaches, diarrhea, and loss of sleep
  43. self._inputs_queue: asyncio.Queue[runtime_pb2.ExpertRequest] = inputs_queue
  44. self._outputs_stream: AsyncIterator[runtime_pb2.ExpertResponse] = outputs_aiter
  45. self._serialized_metadata = MSGPackSerializer.dumps(dict(max_length=max_length))
  46. self.stepped = False
  47. self.closed = False
  48. @classmethod
  49. async def _create(
  50. cls, stub: StubBase, uid: ModuleUID, rpc_info: RPCInfo, timeout: Optional[float] = None, **metadata
  51. ) -> RemoteTransformerBlockInferenceSession:
  52. """Create a new session for a given remote module. This code is meant to be run inside RemoteExpertWorker"""
  53. inputs_queue = asyncio.Queue()
  54. outputs_stream = await stub.rpc_inference(cls._read_inputs_from_queue(inputs_queue, timeout), timeout=timeout)
  55. return cls(uid, rpc_info, inputs_queue, outputs_stream, **metadata)
  56. @staticmethod
  57. async def _read_inputs_from_queue(queue: asyncio.Queue, timeout: Optional[float]) -> AsyncIterator:
  58. while True:
  59. next_input_message = await asyncio.wait_for(queue.get(), timeout)
  60. yield next_input_message
  61. if not next_input_message.uid and not next_input_message.tensors:
  62. break # this message means "done sending"
  63. def step(
  64. self,
  65. new_hidden_states: torch.Tensor,
  66. prompts: Optional[torch.Tensor] = None,
  67. hypo_ids: Optional[torch.Tensor] = None,
  68. ):
  69. """
  70. Inference step: send a chunk of input tesors and receive a chunk of outputs
  71. :prompts: optional DEEP prompts, added to a prefix of each layer's outputs,
  72. if specified, deep promts should have shape [num_layers, batch_size, prefix_len, hid_size]
  73. """
  74. if self.closed:
  75. raise Exception("Session is closed, cannot perform step")
  76. if prompts is None or is_dummy(prompts):
  77. prompts = DUMMY
  78. else:
  79. assert prompts.ndim == 4, "deep promts should have shape [num_layers, batch_size, prefix_len, hid_size]"
  80. assert prompts.shape[0] == self.num_blocks
  81. assert prompts.shape[1] in (new_hidden_states.shape[0], 1)
  82. assert prompts.shape[2] <= new_hidden_states.shape[1]
  83. assert prompts.shape[3] == new_hidden_states.shape[2]
  84. if hypo_ids is None or is_dummy(hypo_ids):
  85. hypo_ids = DUMMY
  86. else:
  87. assert len(hypo_ids) == len(new_hidden_states)
  88. assert hypo_ids.dtype == torch.int64
  89. # serialize inputs and put them into the queue
  90. inputs = (new_hidden_states, prompts, hypo_ids)
  91. outputs_serialized = RemoteExpertWorker.run_coroutine(
  92. self._step(
  93. runtime_pb2.ExpertRequest(
  94. uid=self.uid,
  95. tensors=[
  96. serialize_torch_tensor(tensor.to(proto.dtype), proto.compression)
  97. for tensor, proto in zip(inputs, nested_flatten(self.rpc_info["inference_schema"]))
  98. ],
  99. metadata=self._serialized_metadata if not self.stepped else None,
  100. )
  101. )
  102. )
  103. outputs = list(map(deserialize_torch_tensor, outputs_serialized.tensors))
  104. assert outputs[0].shape == inputs[0].shape, f"expected outputs[0] to be hidden states but got {outputs[0]}"
  105. return outputs[0]
  106. async def _step(self, inputs_serialized: runtime_pb2.ExpertRequest) -> runtime_pb2.ExpertResponse:
  107. """Inference step on serialized data. This code is meant to be run inside RemoteExpertWorker"""
  108. await self._inputs_queue.put(inputs_serialized)
  109. self.stepped = True
  110. return await anext(self._outputs_stream)
  111. def close(self):
  112. """Finish a given inference session, close the underlying connection"""
  113. if self._outputs_stream is None:
  114. return # already closed
  115. RemoteExpertWorker.run_coroutine(self._aclose_stream())
  116. self._outputs_stream = self._inputs_queue = None
  117. self.closed = True
  118. async def _aclose_stream(self):
  119. """Close the inference session. This code is meant to be run inside RemoteExpertWorker"""
  120. if self._outputs_stream is None:
  121. return # already closed
  122. if self.stepped:
  123. await self._inputs_queue.put(runtime_pb2.ExpertRequest()) # empty request will trigger end of session
  124. try:
  125. await anext(self._outputs_stream)
  126. except StopAsyncIteration:
  127. pass
  128. def __del__(self):
  129. self.close()
  130. def __enter__(self):
  131. assert not self.closed
  132. return self
  133. def __exit__(self, *exc_details):
  134. self.close()
  135. class RemoteSequentialInferenceSession:
  136. """
  137. An interface to a multi-step *inference* session for a sequence of remote transformer blocks
  138. """
  139. def __init__(self, sequence_manager: RemoteSequenceManager, p2p: P2P, timeout: Optional[float] = None, **metadata):
  140. self.sequence_manager = sequence_manager
  141. self.p2p = p2p
  142. self.closed = False
  143. self.chosen_spans: List[RemoteSpanInfo] = []
  144. self.stack = contextlib.ExitStack()
  145. self.inference_sessions: List[RemoteTransformerBlockInferenceSession] = []
  146. self.metadata = metadata
  147. self.timeout = timeout
  148. def __enter__(self):
  149. assert not self.closed and not self.chosen_spans
  150. self.stack.__enter__()
  151. # TODO(yozh) replace this code with a fault-tolerant chain that can be reconstructed if some peers fail
  152. self.chosen_spans.extend(self.sequence_manager.make_sequence())
  153. for chosen_span in self.chosen_spans:
  154. stub = TransformerConnectionHandler.get_stub(self.p2p, chosen_span.peer_id)
  155. span_uids: str = CHAIN_DELIMITER.join(self.sequence_manager.block_uids[chosen_span.start : chosen_span.end])
  156. inference_session = RemoteExpertWorker.run_coroutine(
  157. RemoteTransformerBlockInferenceSession._create(
  158. stub, span_uids, rpc_info=self.sequence_manager.rpc_info, timeout=self.timeout, **self.metadata
  159. )
  160. )
  161. self.inference_sessions.append(inference_session)
  162. self.stack.enter_context(inference_session)
  163. return self
  164. def step(self, inputs: torch.Tensor, prompts: Optional[torch.Tensor] = None, **kwargs):
  165. assert not self.closed
  166. if torch.is_grad_enabled():
  167. logger.warning("Running inference session with grad enabled. Gradients will *not* be propagated correctly.")
  168. if prompts is None or is_dummy(prompts):
  169. prompts = DUMMY
  170. else:
  171. assert prompts.ndim == 4 and prompts.shape[0] == len(self.sequence_manager)
  172. for session in self.inference_sessions:
  173. outputs = session.step(inputs, prompts[self.chosen_spans[0].start : self.chosen_spans[0].end], **kwargs)
  174. assert outputs.shape == inputs.shape, f"expected {inputs.shape}, got {outputs.shape}"
  175. inputs = outputs
  176. return inputs
  177. def close(self, *exc_details):
  178. """Finish a given inference session, close the underlying connection"""
  179. if not self.closed:
  180. self.stack.__exit__(*exc_details or (None, None, None))
  181. self.inference_sessions.clear()
  182. self.closed = True
  183. def __exit__(self, *exc_details):
  184. self.close(*exc_details)
  185. def __del__(self):
  186. self.close()