throughput.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. import fcntl
  2. import json
  3. import os
  4. import subprocess
  5. import time
  6. from hashlib import sha256
  7. from pathlib import Path
  8. from typing import Optional, Union
  9. import torch
  10. from hivemind.utils.logging import get_logger, use_hivemind_log_handler
  11. from petals.bloom.block import BloomBlock
  12. from petals.bloom.model import BloomConfig
  13. from petals.bloom.ops import build_alibi_tensor
  14. from petals.server.block_utils import resolve_block_dtype
  15. from petals.utils.convert_8bit import replace_8bit_linear
  16. from petals.utils.disk_cache import DEFAULT_CACHE_DIR
  17. use_hivemind_log_handler("in_root_logger")
  18. logger = get_logger(__file__)
  19. def get_host_throughput(
  20. config: BloomConfig,
  21. device: torch.device,
  22. dtype: Union[str, torch.dtype],
  23. *,
  24. load_in_8bit: bool,
  25. force_eval: bool = False,
  26. cache_dir: Optional[str] = None,
  27. ) -> float:
  28. dtype = resolve_block_dtype(config, dtype)
  29. if cache_dir is None:
  30. cache_dir = DEFAULT_CACHE_DIR
  31. lock_path = Path(cache_dir, "throughput.lock")
  32. cache_path = Path(cache_dir, "throughput_v2.json")
  33. # We use the system-wide lock since only one process at a time can measure the host throughput
  34. os.makedirs(lock_path.parent, exist_ok=True)
  35. with open(lock_path, "wb") as lock_fd:
  36. logger.info("Loading throughput info")
  37. fcntl.flock(lock_fd.fileno(), fcntl.LOCK_EX)
  38. # The OS will release the lock when lock_fd is closed or the process is killed
  39. cache_key = f"config_{sha256(str(config).encode()).hexdigest()[-16:]}"
  40. cache_key += f"_device_{get_device_name(device).replace(' ', '_')}"
  41. cache_key += f"_dtype_{get_dtype_name(dtype, load_in_8bit)}"
  42. cache = {}
  43. try:
  44. if not force_eval and os.path.exists(cache_path):
  45. with open(cache_path) as cache_fd:
  46. cache = json.load(cache_fd)
  47. assert isinstance(cache, dict)
  48. except Exception:
  49. logger.exception(f"Failed to read throughput info from {cache_path}")
  50. cache = {}
  51. if cache_key not in cache:
  52. cache[cache_key] = measure_throughput_info(config, device, dtype, load_in_8bit=load_in_8bit)
  53. try:
  54. os.makedirs(cache_path.parent, exist_ok=True)
  55. with open(cache_path, "w") as cache_fd:
  56. json.dump(cache, cache_fd)
  57. except Exception:
  58. logger.exception(f"Failed to save throughput info in {cache_path}")
  59. return cache[cache_key]
  60. def measure_throughput_info(
  61. config: BloomConfig,
  62. device: torch.device,
  63. dtype: torch.dtype,
  64. *,
  65. load_in_8bit: bool,
  66. ) -> float:
  67. """Measure network and compute throughput in forward pass tokens per second"""
  68. logger.info(
  69. "Measuring network and compute throughput. This takes about a minute and will be cached for future runs"
  70. )
  71. return min(
  72. measure_network_rps(config),
  73. measure_compute_rps(config, device, dtype, load_in_8bit=load_in_8bit),
  74. )
  75. def measure_network_rps(config: BloomConfig) -> float:
  76. proc = subprocess.run("python3 -m petals.cli.speed_test --json", shell=True, capture_output=True)
  77. if proc.returncode != 0:
  78. raise RuntimeError(f"Failed to measure network throughput (stdout: {proc.stdout}, stderr: {proc.stderr})")
  79. network_info = json.loads(proc.stdout)
  80. bits_per_request = config.hidden_size * 16 # Clients usually send 16-bit tensors for forward/backward
  81. network_rps = min(network_info["download"], network_info["upload"]) / bits_per_request
  82. logger.info(
  83. f"Network throughput: "
  84. f"{network_info['download'] / 1e6:.2f} Mbit/s on download, "
  85. f"{network_info['upload'] / 1e6:.2f} Mbit/s on upload, "
  86. f"{network_rps:.1f} RPS"
  87. )
  88. return network_rps
  89. def measure_compute_rps(
  90. config: BloomConfig,
  91. device: torch.device,
  92. dtype: torch.dtype,
  93. *,
  94. load_in_8bit: bool,
  95. n_tokens: int = 16,
  96. n_steps: int = 500,
  97. layer_index: int = 0,
  98. ) -> float:
  99. with torch.inference_mode():
  100. block = BloomBlock(config, layer_index).to(dtype)
  101. if load_in_8bit:
  102. block = replace_8bit_linear(block)
  103. block = block.to(device)
  104. cache = None
  105. elapsed = 0
  106. for step in range(n_steps + 1):
  107. dummy_input = torch.randn(n_tokens, 1, config.hidden_size, device=device, dtype=dtype)
  108. alibi = build_alibi_tensor(step + 1, config.num_attention_heads, device=device, dtype=dtype)
  109. start_time = time.perf_counter()
  110. _, cache = block.forward(dummy_input, alibi=alibi, use_cache=True, layer_past=cache)
  111. if step >= 1: # Skip the 1st step to exclude the initialization time
  112. elapsed += time.perf_counter() - start_time
  113. device_rps = n_steps * n_tokens / elapsed
  114. logger.info(
  115. f"Forward pass throughput ({get_device_name(device)}, {get_dtype_name(dtype, load_in_8bit)}): "
  116. f"{device_rps:.1f} RPS"
  117. )
  118. return device_rps
  119. def get_device_name(device: torch.device) -> str:
  120. return f"{torch.cuda.get_device_name(device)} GPU" if device.type == "cuda" else "CPU"
  121. def get_dtype_name(dtype: torch.dtype, load_in_8bit: bool) -> str:
  122. return "8-bit" if load_in_8bit else str(dtype)