key_manager.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. import random
  2. import re
  3. from typing import List, Optional, Tuple
  4. import numpy as np
  5. from hivemind.averaging.group_info import GroupInfo
  6. from hivemind.dht import DHT
  7. from hivemind.p2p import PeerID
  8. from hivemind.utils import DHTExpiration, get_logger
  9. GroupKey = str
  10. GROUP_PATTERN = re.compile("^(([^.])+)[.]0b[01]*$") # e.g. bert_exp4_averaging.0b01001101
  11. DEFAULT_NUM_BUCKETS = 256
  12. logger = get_logger(__name__)
  13. def is_valid_group(maybe_group: str) -> bool:
  14. """A group identifier must contain group type, followed by one or more .-separated indices, and any ?metadata"""
  15. return bool(GROUP_PATTERN.fullmatch(maybe_group))
  16. class GroupKeyManager:
  17. """
  18. Utility class that declares and fetches averaging-related keys using a DHT
  19. """
  20. def __init__(
  21. self,
  22. dht: DHT,
  23. prefix: str,
  24. initial_group_bits: str,
  25. target_group_size: Optional[int],
  26. ):
  27. assert all(bit in "01" for bit in initial_group_bits)
  28. if target_group_size is not None and not is_power_of_two(target_group_size):
  29. logger.warning("It is recommended to set target_group_size to a power of 2")
  30. self.dht, self.prefix, self.group_bits = dht, prefix, initial_group_bits
  31. self.target_group_size = target_group_size
  32. self.peer_id = dht.peer_id
  33. @property
  34. def current_key(self) -> GroupKey:
  35. return f"{self.prefix}.0b{self.group_bits}"
  36. async def declare_averager(
  37. self, group_key: GroupKey, peer_id: PeerID, expiration_time: float, looking_for_group: bool = True
  38. ) -> bool:
  39. """
  40. Add (or remove) the averager to a given allreduce bucket
  41. :param group_key: allreduce group key, e.g. my_averager.0b011011101
  42. :param peer_id: averager public peer_id for incoming requests
  43. :param expiration_time: intent to run allreduce before this timestamp
  44. :param looking_for_group: by default (True), declare the averager as "looking for group" in a given group;
  45. If False, this will instead mark that the averager as no longer looking for group, (e.g. it already finished)
  46. :return: True if declared, False if declaration was rejected by DHT peers
  47. :note: when leaving (i.e. is_active=False), please specify the same expiration_time as when entering the group
  48. :note: setting is_active=False does *not* guarantee that others will immediately stop to query you.
  49. """
  50. expiration_time = expiration_time if looking_for_group else float(np.nextafter(expiration_time, float("inf")))
  51. return await self.dht.store(
  52. key=group_key,
  53. subkey=peer_id.to_bytes(),
  54. value=looking_for_group,
  55. expiration_time=expiration_time,
  56. return_future=True,
  57. )
  58. async def get_averagers(self, group_key: GroupKey, only_active: bool) -> List[Tuple[PeerID, DHTExpiration]]:
  59. """
  60. Find and return averagers that were declared with a given all-reduce key
  61. :param group_key: finds averagers that have the this group key, e.g. my_averager.0b011011101
  62. :param only_active: if True, return only active averagers that are looking for group (i.e. with value = True)
  63. if False, return all averagers under a given group_key regardless of value
  64. :return: peer_ids and expirations of every matching averager
  65. """
  66. assert is_valid_group(group_key), f"Group key {group_key} is invalid, must follow {GROUP_PATTERN}"
  67. result = await self.dht.get(group_key, latest=True, return_future=True)
  68. if result is None or not isinstance(result.value, dict):
  69. logger.debug(f"Allreduce group not found: {group_key}, creating new group")
  70. return []
  71. averagers = []
  72. for key, looking_for_group in result.value.items():
  73. try:
  74. if only_active and not looking_for_group.value:
  75. continue
  76. averagers.append((PeerID(key), looking_for_group.expiration_time))
  77. except Exception as e:
  78. logger.warning(f"Could not parse group key {key} ({looking_for_group}, exc={e})")
  79. return averagers
  80. async def update_key_on_group_assembled(self, group_info: GroupInfo, is_leader: bool = True):
  81. """this function is triggered every time an averager finds an allreduce group"""
  82. rng = random.Random(group_info.group_id)
  83. index = group_info.peer_ids.index(self.peer_id)
  84. num_buckets = self.target_group_size
  85. if num_buckets is None:
  86. num_buckets = next_power_of_two(group_info.group_size)
  87. generalized_index = rng.sample(range(num_buckets), group_info.group_size)[index]
  88. nbits = int(np.ceil(np.log2(num_buckets)))
  89. new_bits = bin(generalized_index)[2:].rjust(nbits, "0")
  90. self.group_bits = (self.group_bits + new_bits)[-len(self.group_bits) :] if self.group_bits else ""
  91. logger.debug(f"{self.peer_id} - updated group key to {self.group_bits}")
  92. async def update_key_on_not_enough_peers(self):
  93. """this function is triggered whenever averager fails to assemble group within timeout"""
  94. pass # to be implemented in subclasses
  95. def is_power_of_two(n):
  96. """Check whether n is a power of 2"""
  97. return (n != 0) and (n & (n - 1) == 0)
  98. def next_power_of_two(n):
  99. """Round n up to the nearest power of 2"""
  100. return 1 if n == 0 else 2 ** (n - 1).bit_length()