dht_utils.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. """
  2. Utilities for declaring and retrieving active model layers using a shared DHT.
  3. """
  4. from __future__ import annotations
  5. import math
  6. from functools import partial
  7. from typing import Dict, List, Optional, Sequence, Union
  8. from hivemind.dht import DHT, DHTNode, DHTValue
  9. from hivemind.moe.client.remote_expert_worker import RemoteExpertWorker
  10. from hivemind.p2p import P2P, PeerID
  11. from hivemind.utils import DHTExpiration, MPFuture, get_dht_time, get_logger, use_hivemind_log_handler
  12. import src
  13. from src.data_structures import CHAIN_DELIMITER, UID_DELIMITER, ModuleUID, RemoteModuleInfo, ServerInfo, ServerState
  14. use_hivemind_log_handler("in_root_logger")
  15. logger = get_logger(__file__)
  16. def declare_active_modules(
  17. dht: DHT,
  18. uids: Sequence[ModuleUID],
  19. expiration_time: DHTExpiration,
  20. state: ServerState,
  21. throughput: float,
  22. wait: bool = True,
  23. ) -> Union[Dict[ModuleUID, bool], MPFuture[Dict[ModuleUID, bool]]]:
  24. """
  25. Declare that your node serves the specified modules; update timestamps if declared previously
  26. :param uids: a list of module ids to declare
  27. :param wait: if True, awaits for declaration to finish, otherwise runs in background
  28. :param throughput: specify your performance in terms of compute throughput
  29. :param expiration_time: declated modules will be visible for this many seconds
  30. :returns: if wait, returns store status for every key (True = store succeeded, False = store rejected)
  31. """
  32. if isinstance(uids, str):
  33. uids = [uids]
  34. if not isinstance(uids, list):
  35. uids = list(uids)
  36. for uid in uids:
  37. assert isinstance(uid, ModuleUID) and UID_DELIMITER in uid and CHAIN_DELIMITER not in uid
  38. return dht.run_coroutine(
  39. partial(
  40. _declare_active_modules,
  41. uids=uids,
  42. expiration_time=expiration_time,
  43. state=state,
  44. throughput=throughput,
  45. ),
  46. return_future=not wait,
  47. )
  48. async def _declare_active_modules(
  49. dht: DHT,
  50. node: DHTNode,
  51. uids: List[ModuleUID],
  52. expiration_time: DHTExpiration,
  53. state: ServerState,
  54. throughput: float,
  55. ) -> Dict[ModuleUID, bool]:
  56. num_workers = len(uids) if dht.num_workers is None else min(len(uids), dht.num_workers)
  57. return await node.store_many(
  58. keys=uids,
  59. subkeys=[dht.peer_id.to_base58()] * len(uids),
  60. values=[(state.value, throughput)] * len(uids),
  61. expiration_time=expiration_time,
  62. num_workers=num_workers,
  63. )
  64. def get_remote_module(
  65. dht: DHT,
  66. uid_or_uids: Union[ModuleUID, List[ModuleUID]],
  67. expiration_time: Optional[DHTExpiration] = None,
  68. return_future: bool = False,
  69. ) -> Union[List[Optional[src.RemoteTransformerBlock]], MPFuture[List[Optional[src.RemoteTransformerBlock]]]]:
  70. """
  71. :param uid_or_uids: find one or more modules with these ids from across the DHT
  72. :param expiration_time: if specified, return modules that expire no sooner than this (based on get_dht_time)
  73. :param return_future: if False (default), return when finished. Otherwise return MPFuture and run in background.
  74. :returns: a list of [RemoteTransformerBlock if found else None]
  75. """
  76. single_uid = isinstance(uid_or_uids, ModuleUID)
  77. uids = [uid_or_uids] if single_uid else uid_or_uids
  78. infos = dht.run_coroutine(
  79. partial(_get_remote_module_infos, uids=uids, expiration_time=expiration_time), return_future
  80. )
  81. if return_future:
  82. async def _unpack(infos_future: MPFuture, dht: DHT):
  83. p2p = await dht.replicate_p2p()
  84. modules = _create_remote_modules_from_infos(await infos_future, p2p)
  85. return modules[0] if single_uid else modules
  86. return RemoteExpertWorker.run_coroutine(_unpack(infos, dht), return_future)
  87. p2p = RemoteExpertWorker.run_coroutine(dht.replicate_p2p())
  88. modules = _create_remote_modules_from_infos(infos, p2p)
  89. return modules[0] if single_uid else modules
  90. def get_remote_module_infos(
  91. dht: DHT,
  92. uid_or_uids: Union[ModuleUID, List[ModuleUID]],
  93. expiration_time: Optional[DHTExpiration] = None,
  94. ) -> List[Optional[RemoteModuleInfo]]:
  95. single_uid = isinstance(uid_or_uids, ModuleUID)
  96. uids = [uid_or_uids] if single_uid else uid_or_uids
  97. infos = dht.run_coroutine(
  98. partial(_get_remote_module_infos, uids=uids, expiration_time=expiration_time), return_future=False
  99. )
  100. return infos[0] if single_uid else infos
  101. async def _get_remote_module_infos(
  102. dht: DHT, node: DHTNode, uids: List[ModuleUID], expiration_time: Optional[DHTExpiration]
  103. ) -> List[Optional[RemoteModuleInfo]]:
  104. if expiration_time is None:
  105. expiration_time = get_dht_time()
  106. num_workers = len(uids) if dht.num_workers is None else min(len(uids), dht.num_workers)
  107. found: Dict[ModuleUID, DHTValue] = await node.get_many(uids, expiration_time, num_workers=num_workers)
  108. modules: List[Optional[RemoteModuleInfo]] = [None] * len(uids)
  109. for i, uid in enumerate(uids):
  110. metadata = found[uid]
  111. if metadata is None or not isinstance(metadata.value, dict):
  112. if metadata is not None:
  113. logger.error(f"Incorrect metadata for {uid}: {metadata}")
  114. continue
  115. servers = {}
  116. for peer_id, server_info in metadata.value.items():
  117. try:
  118. peer_id = PeerID.from_base58(peer_id)
  119. state, throughput = server_info.value
  120. if not (
  121. isinstance(state, int)
  122. and isinstance(throughput, float)
  123. and math.isfinite(throughput)
  124. and throughput >= 0.0
  125. ):
  126. raise ValueError(f"Invalid server info: {server_info}")
  127. servers[peer_id] = ServerInfo(ServerState(state), throughput)
  128. except (TypeError, ValueError) as e:
  129. logger.error(f"Incorrect peer entry for uid={uid}, peer_id={peer_id}: {e}")
  130. if servers:
  131. modules[i] = RemoteModuleInfo(uid, servers)
  132. return modules
  133. def _create_remote_modules_from_infos(
  134. infos: Sequence[Optional[RemoteModuleInfo]], p2p: P2P
  135. ) -> List[Optional[src.RemoteTransformerBlock]]:
  136. modules: List[Optional[src.RemoteTransformerBlock]] = []
  137. for info in infos:
  138. if info is not None:
  139. modules.append(src.RemoteTransformerBlock(info, p2p))
  140. else:
  141. modules.append(None)
  142. return modules