run_server.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. from functools import partial
  2. from pathlib import Path
  3. import configargparse
  4. import torch
  5. from hivemind.moe.server import Server
  6. from hivemind.moe.server.layers import schedule_name_to_scheduler
  7. from hivemind.proto.runtime_pb2 import CompressionType
  8. from hivemind.utils.limits import increase_file_limit
  9. from hivemind.utils.logging import get_logger, use_hivemind_log_handler
  10. use_hivemind_log_handler("in_root_logger")
  11. logger = get_logger(__name__)
  12. def main():
  13. # fmt:off
  14. parser = configargparse.ArgParser(default_config_files=["config.yml"])
  15. parser.add('-c', '--config', required=False, is_config_file=True, help='config file path')
  16. parser.add_argument('--listen_on', type=str, default='0.0.0.0:*', required=False,
  17. help="'localhost' for local connections only, '0.0.0.0' for ipv4 '[::]' for ipv6")
  18. parser.add_argument('--num_experts', type=int, default=None, required=False, help="The number of experts to serve")
  19. parser.add_argument('--expert_pattern', type=str, default=None, required=False,
  20. help='all expert uids will follow this pattern, e.g. "myexpert.[0:256].[0:1024]" will'
  21. ' sample random expert uids between myexpert.0.0 and myexpert.255.1023 . Use either'
  22. ' num_experts and this or expert_uids')
  23. parser.add_argument('--expert_uids', type=str, nargs="*", default=None, required=False,
  24. help="specify the exact list of expert uids to create. Use either this or num_experts"
  25. " and expert_pattern, not both")
  26. parser.add_argument('--expert_cls', type=str, default='ffn', required=False,
  27. help="expert type from test_utils.layers, e.g. 'ffn', 'transformer', 'det_dropout' or 'nop'.")
  28. parser.add_argument('--hidden_dim', type=int, default=1024, required=False, help='main dimension for expert_cls')
  29. parser.add_argument('--num_handlers', type=int, default=None, required=False,
  30. help='server will use this many processes to handle incoming requests')
  31. parser.add_argument('--min_batch_size', type=int, default=1,
  32. help='Minimum required batch size for all expert operations')
  33. parser.add_argument('--max_batch_size', type=int, default=16384,
  34. help='The total number of examples in the same batch will not exceed this value')
  35. parser.add_argument('--use_averaging', action='store_true', help='Whether to use decentralized parameter and '
  36. 'gradient averaging by wrapping the optimizer '
  37. 'with CollaborativeOptimizer')
  38. parser.add_argument('--averaging_target_batch_size', type=int, required=False,
  39. help='Number of examples to accumulate across all peers before averaging')
  40. parser.add_argument('--averaging_target_group_size', type=int, required=False,
  41. help='Target group size for decentralized averaging')
  42. parser.add_argument('--device', type=str, default=None, required=False,
  43. help='all experts will use this device in torch notation; default: cuda if available else cpu')
  44. parser.add_argument('--fp16',action='store_true',help='Use mixed precision during forward and backward steps')
  45. parser.add_argument('--optimizer', type=str, default='adam', required=False, help='adam, sgd or none')
  46. parser.add_argument('--scheduler', type=str, choices=schedule_name_to_scheduler.keys(), default='none',
  47. help='LR scheduler type to use')
  48. parser.add_argument('--num_warmup_steps', type=int, required=False,
  49. help='The number of warmup steps for LR schedule')
  50. parser.add_argument('--num_total_steps', type=int, required=False, help='The total number of steps for LR schedule')
  51. parser.add_argument('--clip_grad_norm', type=float, required=False, help='Maximum gradient norm used for clipping')
  52. parser.add_argument('--no_dht', action='store_true', help='if specified, the server will not be attached to a dht')
  53. parser.add_argument('--dht_port', type=int)
  54. parser.add_argument('--dht_listen_on', type=str)
  55. parser.add_argument('--initial_peers', type=str, nargs='*', required=False, default=[],
  56. help='multiaddrs of one or more active DHT peers (if you want to join an existing DHT)')
  57. parser.add_argument('--increase_file_limit', action='store_true',
  58. help='On *nix, this will increase the max number of processes '
  59. 'a server can spawn before hitting "Too many open files"; Use at your own risk.')
  60. parser.add_argument('--compression', type=str, default='NONE', required=False, help='Tensor compression for gRPC')
  61. parser.add_argument('--averaging_compression', type=str, default='FLOAT16', required=False, help='Averaging compression')
  62. parser.add_argument('--checkpoint_dir', type=Path, required=False, help='Directory to store expert checkpoints')
  63. parser.add_argument('--stats_report_interval', type=int, required=False,
  64. help='Interval between two reports of batch processing performance statistics')
  65. parser.add_argument('--custom_module_path', type=str, required=False,
  66. help='Path of a file with custom nn.modules, wrapped into special decorator')
  67. parser.add_argument('--identity_path', type=str, required=False,
  68. help='Path of a libp2p identity file')
  69. parser.add_argument('--averaging_min_refresh_period',type=float,default=1)
  70. parser.add_argument('--averaging_max_refresh_period',type=float,default=60)
  71. parser.add_argument('--averaging_default_refresh_period',type=float,default=10)
  72. parser.add_argument('--averaging_expiration',type=float,default=10)
  73. parser.add_argument('--metadata_expiration',type=float,default=120)
  74. parser.add_argument('--averaging_timeout',type=float,default=30)
  75. parser.add_argument('--reuse_grad_buffers',type=bool,default=True)
  76. # fmt:on
  77. args = vars(parser.parse_args())
  78. args.pop("config", None)
  79. optimizer = args.pop("optimizer")
  80. if optimizer == "adam":
  81. optim_cls = torch.optim.Adam
  82. elif optimizer == "sgd":
  83. optim_cls = partial(torch.optim.SGD, lr=0.01)
  84. elif optimizer == "none":
  85. optim_cls = None
  86. else:
  87. raise ValueError("optim_cls must be adam, sgd or none")
  88. if args.pop("increase_file_limit"):
  89. increase_file_limit()
  90. compression_type = args.pop("compression")
  91. compression = getattr(CompressionType, compression_type)
  92. server = Server.create(**args, optim_cls=optim_cls, start=True, compression=compression)
  93. try:
  94. server.join()
  95. except KeyboardInterrupt:
  96. logger.info("Caught KeyboardInterrupt, shutting down")
  97. finally:
  98. server.shutdown()
  99. if __name__ == "__main__":
  100. main()