config.yml 1.3 KB

123456789101112131415
  1. listen_on: 0.0.0.0:* #'localhost' for local connections only, '0.0.0.0' for ipv4 '::' for ipv6
  2. num_experts: 1 #run this many identical experts
  3. expert_cls: ffn #expert type from test_utils.layers, e.g. 'ffn', 'transformer', 'det_dropout' or 'nop'.
  4. hidden_dim: 1024 #main dimension for expert_cls
  5. #num_handlers: #'server will use this many processes to handle incoming requests
  6. expert_prefix: expert #all expert uids will be {expert_prefix}.{index}
  7. expert_offset: 0 #expert uid will use indices in range(expert_offset, expert_offset + num_experts)
  8. max_batch_size: 16384 #total num examples in the same batch will not exceed this value
  9. #device: #all experts will use this device in torch notation; default: cuda if available else cpu
  10. no_optimizer: True #if specified, all optimizers use learning rate=0
  11. no_dht: True #if specified, the server will not be attached to a dht
  12. initial_peers: "[]" #a list of peers that will introduce this node to the dht, e.g. [("1.2.3.4", 1337), ("127.0.0.1", 4321)]
  13. #dht_port: #DHT node will listen on this port
  14. #root_port: #If this server does not have peers, it will create a virtual dht node on this port. You can then use this node as initial peer.
  15. increase_file_limit: True #On *nix, this will increase the max number of processes a server can spawn before hitting "Too many open files"; Use at your own risk.