Forráskód Böngészése

add docs in sphinx format: tesseract.runtime

justheuristic 5 éve
szülő
commit
795bae239c

+ 2 - 2
docs/modules/client.rst

@@ -1,5 +1,5 @@
-tesseract.client
-================
+``tesseract.client``
+====================
 
 .. automodule:: tesseract.client
 

+ 1 - 5
docs/modules/server.rst

@@ -1,5 +1,5 @@
 ``tesseract.server & tesseract.runtime``
-================
+========================================
 
 .. automodule:: tesseract.server
 
@@ -9,10 +9,6 @@
    :members:
    :member-order: bysource
 
-
-tesseract.runtime
-=================
-
 .. currentmodule:: tesseract.runtime
 
 .. autoclass:: TesseractRuntime

+ 4 - 3
tesseract/runtime/expert_backend.py

@@ -11,9 +11,10 @@ class ExpertBackend(nn.Module):
     """
     ExpertBackend is a wrapper around torch module that allows it to run tasks asynchronously with TesseractRuntime
     By default, ExpertBackend handles three types of requests:
-     * forward - receive inputs and compute outputs. Concurrent requests will be batched for better GPU utilization.
-     * backward - receive gradients w.r.t. outputs, compute gradients w.r.t. inputs and **update expert**. Also batched.
-     * get_info - return expert metadata. Not batched.
+
+     - forward - receive inputs and compute outputs. Concurrent requests will be batched for better GPU utilization.
+     - backward - receive gradients w.r.t. outputs, compute gradients w.r.t. inputs and **update expert**. Also batched.
+     - get_info - return expert metadata. Not batched.
 
     :param expert: nn.Module to be wrapped into a backend. Arbitrary pytorch module with a few limitations:
 

+ 2 - 1
tesseract/runtime/task_pool.py

@@ -59,7 +59,7 @@ class TaskPool(TaskPoolBase):
     to process these batches and dispatches results back to request sources. Operates as a background process.
 
     :param process_func: function to be applied to every formed batch; called by TesseractRuntime
-        Note: process_func should accept only *args Tensors and return a list of output Tensors
+        Note that process_func should accept only \*args Tensors and return a flat tuple of Tensors
     :param max_batch_size: process at most this many inputs in a batch (task contains have one or several inputs)
     :param min_batch_size: process at least this many inputs in a batch, otherwise wait for more
     :param timeout: wait for a subsequent task for at most this many seconds
@@ -90,6 +90,7 @@ class TaskPool(TaskPoolBase):
             self.start()
 
     def submit_task(self, *args: torch.Tensor) -> Future:
+        """ Add task to this pool's queue, return Future for its output """
         future1, future2 = SharedFuture.make_pair()
         self.tasks.put(Task(future1, args))
         self.undispatched_task_timestamps.put(time.time())