Ver código fonte

wip: parallel fault-tolerant moe backward pass

justheuristic 5 anos atrás
pai
commit
2b2ddf8280
1 arquivos alterados com 1 adições e 3 exclusões
  1. 1 3
      tesseract/client/moe.py

+ 1 - 3
tesseract/client/moe.py

@@ -177,8 +177,6 @@ class _RemoteMoECall(torch.autograd.Function):
     This function that can recover from individual failures during forward and/or backward passes.
     For user-friendly version of this function, use RemoteMixtureOfExperts module.
     """
-    MIN_TOTAL_WEIGHT = 1e-3
-
     @classmethod
     def forward(cls, ctx, expert_logits: torch.Tensor, experts: List[RemoteExpert],
                 *flat_inputs: torch.Tensor, input_schema, k_min: int, timeout_after_k_min: float, backward_k_min: int,
@@ -229,7 +227,7 @@ class _RemoteMoECall(torch.autograd.Function):
             *survived_grad_inputs))
 
         grad_logits = None  # TODO
-        return (grad_logits, None, *flat_grad_inputs, None, None, None, None, None, None)
+        return grad_logits, None, *flat_grad_inputs, None, None, None, None, None, None
 
     @staticmethod
     def _run_expert_forward(expert: RemoteExpert, *args: torch.Tensor, **kwargs: torch.Tensor):