dht_utils.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. """
  2. Utilities for declaring and retrieving active model layers using a shared DHT.
  3. """
  4. from __future__ import annotations
  5. import math
  6. from functools import partial
  7. from typing import Dict, List, Optional, Sequence, Union
  8. from hivemind.dht import DHT, DHTNode, DHTValue
  9. from hivemind.moe.client.remote_expert_worker import RemoteExpertWorker
  10. from hivemind.p2p import PeerID
  11. from hivemind.utils import DHTExpiration, MPFuture, get_dht_time, get_logger, use_hivemind_log_handler
  12. import src
  13. from src.data_structures import CHAIN_DELIMITER, UID_DELIMITER, ModuleUID, RemoteModuleInfo, ServerInfo, ServerState
  14. use_hivemind_log_handler("in_root_logger")
  15. logger = get_logger(__file__)
  16. def declare_active_modules(
  17. dht: DHT,
  18. uids: Sequence[ModuleUID],
  19. expiration_time: DHTExpiration,
  20. state: ServerState,
  21. throughput: float,
  22. wait: bool = True,
  23. ) -> Union[Dict[ModuleUID, bool], MPFuture[Dict[ModuleUID, bool]]]:
  24. """
  25. Declare that your node serves the specified modules; update timestamps if declared previously
  26. :param uids: a list of module ids to declare
  27. :param wait: if True, awaits for declaration to finish, otherwise runs in background
  28. :param throughput: specify your performance in terms of compute throughput
  29. :param expiration_time: declated modules will be visible for this many seconds
  30. :returns: if wait, returns store status for every key (True = store succeeded, False = store rejected)
  31. """
  32. if isinstance(uids, str):
  33. uids = [uids]
  34. if not isinstance(uids, list):
  35. uids = list(uids)
  36. for uid in uids:
  37. assert isinstance(uid, ModuleUID) and UID_DELIMITER in uid and CHAIN_DELIMITER not in uid
  38. return dht.run_coroutine(
  39. partial(
  40. _declare_active_modules,
  41. uids=uids,
  42. expiration_time=expiration_time,
  43. state=state,
  44. throughput=throughput,
  45. ),
  46. return_future=not wait,
  47. )
  48. async def _declare_active_modules(
  49. dht: DHT,
  50. node: DHTNode,
  51. uids: List[ModuleUID],
  52. expiration_time: DHTExpiration,
  53. state: ServerState,
  54. throughput: float,
  55. ) -> Dict[ModuleUID, bool]:
  56. num_workers = len(uids) if dht.num_workers is None else min(len(uids), dht.num_workers)
  57. return await node.store_many(
  58. keys=uids,
  59. subkeys=[dht.peer_id.to_base58()] * len(uids),
  60. values=[(state.value, throughput)] * len(uids),
  61. expiration_time=expiration_time,
  62. num_workers=num_workers,
  63. )
  64. def get_remote_sequence(
  65. dht: DHT,
  66. start: int,
  67. stop: int,
  68. config: src.DistributedBloomConfig,
  69. dht_prefix: Optional[str] = None,
  70. return_future: bool = False,
  71. ) -> Union[src.RemoteSequential, MPFuture]:
  72. return RemoteExpertWorker.run_coroutine(
  73. _get_remote_sequence(dht, start, stop, config, dht_prefix), return_future=return_future
  74. )
  75. async def _get_remote_sequence(
  76. dht: DHT,
  77. start: int,
  78. stop: int,
  79. config: src.DistributedBloomConfig,
  80. dht_prefix: Optional[str] = None,
  81. ) -> src.RemoteSequential:
  82. uids = [f"{config.dht_prefix}{UID_DELIMITER}{i}" for i in range(start, stop)]
  83. p2p = await dht.replicate_p2p()
  84. manager = src.RemoteSequenceManager(dht, uids, p2p)
  85. return src.RemoteSequential(config, dht, dht_prefix, p2p, manager)
  86. def get_remote_module(
  87. dht: DHT,
  88. uid_or_uids: Union[ModuleUID, List[ModuleUID]],
  89. config: src.DistributedBloomConfig,
  90. dht_prefix: Optional[str] = None,
  91. return_future: bool = False,
  92. ) -> Union[Union[src.RemoteTransformerBlock, List[src.RemoteTransformerBlock]], MPFuture]:
  93. """
  94. :param uid_or_uids: find one or more modules with these ids from across the DHT
  95. :param config: model config, usualy taken by .from_pretrained(MODEL_NAME)
  96. :param return_future: if False (default), return when finished. Otherwise return MPFuture and run in background.
  97. :returns: a list of [RemoteTransformerBlock]
  98. """
  99. return RemoteExpertWorker.run_coroutine(
  100. _get_remote_module(dht, uid_or_uids, config, dht_prefix), return_future=return_future
  101. )
  102. async def _get_remote_module(
  103. dht: DHT,
  104. uid_or_uids: Union[ModuleUID, List[ModuleUID]],
  105. config: src.DistributedBloomConfig,
  106. dht_prefix: Optional[str] = None,
  107. ) -> Union[src.RemoteTransformerBlock, List[src.RemoteTransformerBlock]]:
  108. single_uid = isinstance(uid_or_uids, ModuleUID)
  109. uids = [uid_or_uids] if single_uid else uid_or_uids
  110. p2p = await dht.replicate_p2p()
  111. managers = (src.RemoteSequenceManager(dht, [uid], p2p) for uid in uids)
  112. modules = [
  113. src.RemoteTransformerBlock(config, dht, dht_prefix=dht_prefix, p2p=p2p, sequence_manager=m) for m in managers
  114. ]
  115. return modules[0] if single_uid else modules
  116. def get_remote_module_infos(
  117. dht: DHT,
  118. uid_or_uids: Union[ModuleUID, List[ModuleUID]],
  119. expiration_time: Optional[DHTExpiration] = None,
  120. ) -> List[Optional[RemoteModuleInfo]]:
  121. single_uid = isinstance(uid_or_uids, ModuleUID)
  122. uids = [uid_or_uids] if single_uid else uid_or_uids
  123. infos = dht.run_coroutine(
  124. partial(_get_remote_module_infos, uids=uids, expiration_time=expiration_time), return_future=False
  125. )
  126. return infos[0] if single_uid else infos
  127. async def _get_remote_module_infos(
  128. dht: DHT, node: DHTNode, uids: List[ModuleUID], expiration_time: Optional[DHTExpiration]
  129. ) -> List[Optional[RemoteModuleInfo]]:
  130. if expiration_time is None:
  131. expiration_time = get_dht_time()
  132. num_workers = len(uids) if dht.num_workers is None else min(len(uids), dht.num_workers)
  133. found: Dict[ModuleUID, DHTValue] = await node.get_many(uids, expiration_time, num_workers=num_workers)
  134. modules: List[Optional[RemoteModuleInfo]] = [None] * len(uids)
  135. for i, uid in enumerate(uids):
  136. metadata = found[uid]
  137. if metadata is None or not isinstance(metadata.value, dict):
  138. if metadata is not None:
  139. logger.error(f"Incorrect metadata for {uid}: {metadata}")
  140. continue
  141. servers = {}
  142. for peer_id, server_info in metadata.value.items():
  143. try:
  144. peer_id = PeerID.from_base58(peer_id)
  145. state, throughput = server_info.value
  146. if not (
  147. isinstance(state, int)
  148. and isinstance(throughput, float)
  149. and math.isfinite(throughput)
  150. and throughput >= 0.0
  151. ):
  152. raise ValueError(f"Invalid server info: {server_info}")
  153. servers[peer_id] = ServerInfo(ServerState(state), throughput)
  154. except (TypeError, ValueError) as e:
  155. logger.error(f"Incorrect peer entry for uid={uid}, peer_id={peer_id}: {e}")
  156. if servers:
  157. modules[i] = RemoteModuleInfo(uid, servers)
  158. return modules