benchmark_dht.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. import argparse
  2. import random
  3. import time
  4. from tqdm import trange
  5. import hivemind
  6. from hivemind.moe.server import declare_experts, get_experts
  7. from hivemind.utils.limits import increase_file_limit
  8. logger = hivemind.get_logger(__name__)
  9. def random_endpoint() -> hivemind.Endpoint:
  10. return f"{random.randint(0, 256)}.{random.randint(0, 256)}.{random.randint(0, 256)}." \
  11. f"{random.randint(0, 256)}:{random.randint(0, 65535)}"
  12. def benchmark_dht(num_peers: int, initial_peers: int, num_experts: int, expert_batch_size: int, random_seed: int,
  13. wait_after_request: float, wait_before_read: float, wait_timeout: float, expiration: float):
  14. random.seed(random_seed)
  15. logger.info("Creating peers...")
  16. peers = []
  17. for _ in trange(num_peers):
  18. neighbors = sum([peer.get_visible_maddrs()
  19. for peer in random.sample(peers, min(initial_peers, len(peers)))], [])
  20. peer = hivemind.DHT(initial_peers=neighbors, start=True, wait_timeout=wait_timeout)
  21. peers.append(peer)
  22. store_peer, get_peer = peers[-2:]
  23. expert_uids = list(set(f"expert.{random.randint(0, 999)}.{random.randint(0, 999)}.{random.randint(0, 999)}"
  24. for _ in range(num_experts)))
  25. logger.info(f"Sampled {len(expert_uids)} unique ids (after deduplication)")
  26. random.shuffle(expert_uids)
  27. logger.info(f"Storing experts to dht in batches of {expert_batch_size}...")
  28. successful_stores = total_stores = total_store_time = 0
  29. benchmark_started = time.perf_counter()
  30. endpoints = []
  31. for start in trange(0, num_experts, expert_batch_size):
  32. store_start = time.perf_counter()
  33. endpoints.append(random_endpoint())
  34. store_ok = declare_experts(store_peer, expert_uids[start: start + expert_batch_size], endpoints[-1],
  35. expiration=expiration)
  36. successes = store_ok.values()
  37. total_store_time += time.perf_counter() - store_start
  38. total_stores += len(successes)
  39. successful_stores += sum(successes)
  40. time.sleep(wait_after_request)
  41. logger.info(
  42. f"Store success rate: {successful_stores / total_stores * 100:.1f}% ({successful_stores} / {total_stores})")
  43. logger.info(f"Mean store time: {total_store_time / total_stores:.5}, Total: {total_store_time:.5}")
  44. time.sleep(wait_before_read)
  45. if time.perf_counter() - benchmark_started > expiration:
  46. logger.warning("All keys expired before benchmark started getting them. Consider increasing expiration_time")
  47. successful_gets = total_get_time = 0
  48. for start in trange(0, len(expert_uids), expert_batch_size):
  49. get_start = time.perf_counter()
  50. get_result = get_experts(get_peer, expert_uids[start: start + expert_batch_size])
  51. total_get_time += time.perf_counter() - get_start
  52. for i, expert in enumerate(get_result):
  53. if expert is not None and expert.uid == expert_uids[start + i] \
  54. and expert.endpoint == endpoints[start // expert_batch_size]:
  55. successful_gets += 1
  56. if time.perf_counter() - benchmark_started > expiration:
  57. logger.warning("keys expired midway during get requests. If that isn't desired, increase expiration_time param")
  58. logger.info(
  59. f"Get success rate: {successful_gets / len(expert_uids) * 100:.1f} ({successful_gets} / {len(expert_uids)})")
  60. logger.info(f"Mean get time: {total_get_time / len(expert_uids):.5f}, Total: {total_get_time:.5f}")
  61. alive_peers = [peer.is_alive() for peer in peers]
  62. logger.info(f"Node survival rate: {len(alive_peers) / len(peers) * 100:.3f}%")
  63. if __name__ == "__main__":
  64. parser = argparse.ArgumentParser()
  65. parser.add_argument('--num_peers', type=int, default=32, required=False)
  66. parser.add_argument('--initial_peers', type=int, default=1, required=False)
  67. parser.add_argument('--num_experts', type=int, default=256, required=False)
  68. parser.add_argument('--expert_batch_size', type=int, default=32, required=False)
  69. parser.add_argument('--expiration', type=float, default=300, required=False)
  70. parser.add_argument('--wait_after_request', type=float, default=0, required=False)
  71. parser.add_argument('--wait_before_read', type=float, default=0, required=False)
  72. parser.add_argument('--wait_timeout', type=float, default=5, required=False)
  73. parser.add_argument('--random_seed', type=int, default=random.randint(1, 1000))
  74. parser.add_argument('--increase_file_limit', action="store_true")
  75. args = vars(parser.parse_args())
  76. if args.pop('increase_file_limit', False):
  77. increase_file_limit()
  78. benchmark_dht(**args)