run_server.py 4.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. import os, sys
  2. sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # add path to src
  3. import configargparse
  4. from hivemind.proto.runtime_pb2 import CompressionType
  5. from hivemind.utils.limits import increase_file_limit
  6. from hivemind.utils.logging import get_logger, use_hivemind_log_handler
  7. from src.server.server import Server
  8. use_hivemind_log_handler("in_root_logger")
  9. logger = get_logger(__name__)
  10. def main():
  11. # fmt:off
  12. parser = configargparse.ArgParser(default_config_files=["config.yml"])
  13. parser.add('-c', '--config', required=False, is_config_file=True, help='config file path')
  14. parser.add_argument('--block_config', type=str, default='bigscience/bloom', help="name or path of model config")
  15. parser.add_argument('--num_blocks', type=int, default=1, help="The number of blocks to serve")
  16. parser.add_argument('--host_maddrs', type=list, nargs='+', default=['/ip4/0.0.0.0/tcp/0'], required=False,
  17. help='Multiaddrs to listen for external connections from other p2p instances; default: all IPv4 and TCP: /ip4/0.0.0.0/tcp/0')
  18. parser.add_argument('--announce_maddrs', type=list, nargs='+', default=None, required=False,
  19. help='Visible multiaddrs the host announces for external connections from other p2p instances')
  20. parser.add_argument('--compression', type=str, default='NONE', required=False, help='Tensor compression communication')
  21. parser.add_argument('--num_handlers', type=int, default=None, required=False,
  22. help='server will use this many processes to handle incoming requests')
  23. parser.add_argument('--min_batch_size', type=int, default=1,
  24. help='Minimum required batch size for all expert operations')
  25. parser.add_argument('--max_batch_size', type=int, default=16384,
  26. help='The total number of examples in the same batch will not exceed this value')
  27. parser.add_argument('--cache_size_bytes', type=int, default=None,
  28. help='The size of memory cache for storing past attention keys/values between inference steps')
  29. parser.add_argument('--device', type=str, default=None, required=False,
  30. help='all experts will use this device in torch notation; default: cuda if available else cpu')
  31. parser.add_argument('--update_period', type=float, required=False, default=30,
  32. help='Server will report experts to DHT once in this many seconds')
  33. parser.add_argument('--expiration', type=float, required=False, default=None,
  34. help='DHT entries will expire after this many seconds')
  35. parser.add_argument('--initial_peers', type=str, nargs='*', required=False, default=[],
  36. help='multiaddrs of one or more active DHT peers (if you want to join an existing DHT)')
  37. parser.add_argument('--increase_file_limit', action='store_true',
  38. help='On *nix, this will increase the max number of processes '
  39. 'a server can spawn before hitting "Too many open files"; Use at your own risk.')
  40. parser.add_argument('--stats_report_interval', type=int, required=False,
  41. help='Interval between two reports of batch processing performance statistics')
  42. parser.add_argument('--custom_module_path', type=str, required=False,
  43. help='Path of a file with custom nn.modules, wrapped into special decorator')
  44. # fmt:on
  45. args = vars(parser.parse_args())
  46. args.pop("config", None)
  47. if args.pop("increase_file_limit"):
  48. increase_file_limit()
  49. compression_type = args.pop("compression")
  50. compression = getattr(CompressionType, compression_type)
  51. server = Server.create(**args, start=True, compression=compression)
  52. try:
  53. server.join()
  54. except KeyboardInterrupt:
  55. logger.info("Caught KeyboardInterrupt, shutting down")
  56. finally:
  57. server.shutdown()
  58. if __name__ == "__main__":
  59. main()