expert.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. from typing import Any, Dict, Optional, Tuple
  2. import torch
  3. import torch.nn as nn
  4. from torch.autograd.function import once_differentiable
  5. from hivemind.compression import deserialize_torch_tensor, serialize_torch_tensor
  6. from hivemind.proto import runtime_pb2, runtime_pb2_grpc as runtime_grpc
  7. from hivemind.utils import Endpoint, MSGPackSerializer, nested_compare, nested_flatten, nested_pack
  8. from hivemind.utils.grpc import ChannelCache
  9. DUMMY = torch.empty(0, requires_grad=True) # dummy tensor that triggers autograd in RemoteExpert
  10. def _get_expert_stub(endpoint: Endpoint, *extra_options: Tuple[str, Any]):
  11. """Create a gRPC stub to access remote expert or use previously created stub from a process-wide cache"""
  12. channel_options = (("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1)) + extra_options
  13. return ChannelCache.get_stub(endpoint, runtime_grpc.ConnectionHandlerStub, aio=False, options=channel_options)
  14. class RemoteExpert(nn.Module):
  15. """
  16. A simple module that runs forward/backward of an expert hosted on a remote machine.
  17. Works seamlessly with pytorch autograd. (this is essentially a simple RPC function)
  18. Warning: RemoteExpert currently assumes that you provide it with correct input shapes.
  19. Sending wrong input shapes can cause RemoteExpert to freeze indefinitely due to error in runtime.
  20. :param uid: unique expert identifier
  21. :param endpoint: network endpoint of a server that services that expert, e.g. "201.123.321.99:1337" or "[::]:8080"
  22. """
  23. def __init__(self, uid, endpoint: Endpoint):
  24. super().__init__()
  25. self.uid, self.endpoint = uid, endpoint
  26. self._info = None
  27. @property
  28. def stub(self):
  29. return _get_expert_stub(self.endpoint)
  30. def forward(self, *args, **kwargs):
  31. """Call RemoteExpert for the specified inputs and return its output(s). Compatible with pytorch.autograd."""
  32. assert len(kwargs) == len(self.info["keyword_names"]), f"Keyword args should be {self.info['keyword_names']}"
  33. kwargs = {key: kwargs[key] for key in self.info["keyword_names"]}
  34. # Note: we put keyword arguments in the same order as on a server to prevent f(a=1, b=2) != f(b=2, a=1) errors
  35. forward_inputs = (args, kwargs)
  36. if not nested_compare(forward_inputs, self.info["forward_schema"]):
  37. raise TypeError(f"Inputs do not match expert input schema. Did you pass the right number of parameters?")
  38. flat_outputs = _RemoteModuleCall.apply(DUMMY, self.uid, self.stub, self.info, *nested_flatten(forward_inputs))
  39. # Note: we send DUMMY to prevent torch from excluding expert from backward if no other inputs require grad
  40. return nested_pack(flat_outputs, structure=self.info["outputs_schema"])
  41. @property
  42. def info(self):
  43. if self._info is None:
  44. outputs = self.stub.info(runtime_pb2.ExpertUID(uid=self.uid))
  45. self._info = MSGPackSerializer.loads(outputs.serialized_info)
  46. return self._info
  47. def extra_repr(self):
  48. return f"uid={self.uid}, endpoint={self.endpoint}"
  49. class _RemoteModuleCall(torch.autograd.Function):
  50. """Internal autograd-friendly call of a remote module. For applications, use RemoteExpert instead."""
  51. @staticmethod
  52. def forward(
  53. ctx,
  54. dummy: torch.Tensor,
  55. uid: str,
  56. stub: runtime_grpc.ConnectionHandlerStub,
  57. info: Dict[str, Any],
  58. *inputs: torch.Tensor,
  59. ) -> Tuple[torch.Tensor, ...]:
  60. # Note: *inputs are flattened input tensors that follow the expert's info['input_schema']
  61. # detach to avoid pickling the computation graph
  62. inputs = tuple(tensor.cpu().detach() for tensor in inputs)
  63. ctx.uid, ctx.stub, ctx.info = uid, stub, info
  64. ctx.save_for_backward(*inputs)
  65. serialized_tensors = [
  66. serialize_torch_tensor(inp, proto.compression)
  67. for inp, proto in zip(inputs, nested_flatten(info["forward_schema"]))
  68. ]
  69. outputs = stub.forward(runtime_pb2.ExpertRequest(uid=ctx.uid, tensors=serialized_tensors))
  70. deserialized_outputs = [deserialize_torch_tensor(tensor) for tensor in outputs.tensors]
  71. return tuple(deserialized_outputs)
  72. @staticmethod
  73. @once_differentiable
  74. def backward(ctx, *grad_outputs) -> Tuple[Optional[torch.Tensor], ...]:
  75. grad_outputs_cpu = tuple(tensor.cpu() for tensor in grad_outputs)
  76. inputs_and_grad_outputs = tuple(nested_flatten((ctx.saved_tensors, grad_outputs_cpu)))
  77. backward_schema = tuple(nested_flatten((ctx.info["forward_schema"], ctx.info["outputs_schema"])))
  78. serialized_tensors = [
  79. serialize_torch_tensor(tensor, proto.compression)
  80. for tensor, proto in zip(inputs_and_grad_outputs, backward_schema)
  81. ]
  82. grad_inputs = ctx.stub.backward(runtime_pb2.ExpertRequest(uid=ctx.uid, tensors=serialized_tensors))
  83. deserialized_grad_inputs = [deserialize_torch_tensor(tensor) for tensor in grad_inputs.tensors]
  84. return (DUMMY, None, None, None, *deserialized_grad_inputs)