Explorar el Código

move to notes

justheuristic hace 5 años
padre
commit
8931c56f73
Se han modificado 1 ficheros con 0 adiciones y 1 borrados
  1. 0 1
      tesseract/client/moe.py

+ 0 - 1
tesseract/client/moe.py

@@ -212,7 +212,6 @@ class _RemoteMoECall(torch.autograd.Function):
     @once_differentiable
     def backward(cls, ctx, *grad_outputs_flat: torch.Tensor) -> Tuple[Optional[torch.Tensor], ...]:
         """ Like normal backward, but we ignore any experts that failed during backward pass """
-        #TODO add dummy tensor or something else that ensures that backward pass is not omitted even if inputs do not require grad
         expert_logits, alive_ix, alive_expert_probas = ctx.saved_tensors
         alive_contexts, k_min, timeout = ctx._alive_contexts, ctx._backward_k_min, ctx._backward_timeout