test_compression.py 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. import multiprocessing as mp
  2. from ctypes import c_int32
  3. import pytest
  4. import torch
  5. import torch.nn as nn
  6. import hivemind
  7. from hivemind.compression import (
  8. CompressionBase,
  9. CompressionInfo,
  10. Float16Compression,
  11. NoCompression,
  12. PerTensorCompression,
  13. RoleAdaptiveCompression,
  14. SizeAdaptiveCompression,
  15. Uniform8BitQuantization,
  16. deserialize_torch_tensor,
  17. serialize_torch_tensor,
  18. )
  19. from hivemind.compression.adaptive import AdaptiveCompressionBase
  20. from hivemind.proto.runtime_pb2 import CompressionType
  21. from test_utils.dht_swarms import launch_dht_instances
  22. @pytest.mark.forked
  23. def test_tensor_compression(size=(128, 128, 64), alpha=5e-08, beta=0.0008):
  24. torch.manual_seed(0)
  25. X = torch.randn(*size)
  26. assert torch.allclose(deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.NONE)), X)
  27. error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.MEANSTD_16BIT)) - X
  28. assert error.square().mean() < alpha
  29. error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.FLOAT16)) - X
  30. assert error.square().mean() < alpha
  31. error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.QUANTILE_8BIT)) - X
  32. assert error.square().mean() < beta
  33. error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.UNIFORM_8BIT)) - X
  34. assert error.square().mean() < beta
  35. zeros = torch.zeros(5, 5)
  36. for compression_type in CompressionType.values():
  37. assert deserialize_torch_tensor(serialize_torch_tensor(zeros, compression_type)).isfinite().all()
  38. @pytest.mark.forked
  39. def test_serialize_tensor():
  40. def _check(tensor, compression, rtol=1e-5, atol=1e-8, chunk_size=30 * 1024):
  41. serialized_tensor = serialize_torch_tensor(tensor, compression)
  42. chunks = list(hivemind.split_for_streaming(serialized_tensor, chunk_size))
  43. assert len(chunks) == (len(serialized_tensor.buffer) - 1) // chunk_size + 1
  44. restored = hivemind.combine_from_streaming(chunks)
  45. assert torch.allclose(deserialize_torch_tensor(restored), tensor, rtol=rtol, atol=atol)
  46. tensor = torch.randn(512, 12288)
  47. for chunk_size in [1024, 64 * 1024, 64 * 1024 + 1, 10**9]:
  48. _check(tensor, CompressionType.NONE, chunk_size=chunk_size)
  49. _check(tensor, CompressionType.FLOAT16, rtol=0.0, atol=1e-2)
  50. _check(torch.randint(0, 100, (512, 1, 1)), CompressionType.NONE)
  51. _check(torch.tensor(1.0), CompressionType.NONE)
  52. _check(torch.tensor(1.0), CompressionType.FLOAT16)
  53. @pytest.mark.forked
  54. def test_allreduce_compression():
  55. """this test ensures that compression works correctly when multiple tensors have different compression types"""
  56. tensors1 = [torch.linspace(0, 500, 1000) ** 0.5, torch.randn(1000)]
  57. tensors2 = [torch.linspace(300, 800, 1000) ** 0.5, torch.randn(1000)]
  58. results = {}
  59. FLOAT16, UINT8 = Float16Compression(), Uniform8BitQuantization()
  60. for compression_type_pair in [(FLOAT16, FLOAT16), (FLOAT16, UINT8), (UINT8, FLOAT16), (UINT8, UINT8)]:
  61. dht_instances = launch_dht_instances(2)
  62. averager1 = hivemind.averaging.DecentralizedAverager(
  63. [x.clone() for x in tensors1],
  64. dht=dht_instances[0],
  65. compression=PerTensorCompression(compression_type_pair),
  66. client_mode=True,
  67. target_group_size=2,
  68. prefix="mygroup",
  69. start=True,
  70. )
  71. averager2 = hivemind.averaging.DecentralizedAverager(
  72. [x.clone() for x in tensors2],
  73. dht=dht_instances[1],
  74. compression=PerTensorCompression(compression_type_pair),
  75. target_group_size=2,
  76. prefix="mygroup",
  77. start=True,
  78. )
  79. for future in averager1.step(wait=False), averager2.step(wait=False):
  80. future.result()
  81. with averager1.get_tensors() as averaged_tensors:
  82. results[compression_type_pair] = averaged_tensors
  83. for instance in [averager1, averager2] + dht_instances:
  84. instance.shutdown()
  85. assert torch.allclose(results[UINT8, FLOAT16][0], results[UINT8, UINT8][0])
  86. assert torch.allclose(results[UINT8, FLOAT16][1], results[FLOAT16, FLOAT16][1])
  87. assert torch.allclose(results[UINT8, UINT8][1], results[FLOAT16, UINT8][1])
  88. assert torch.allclose(results[FLOAT16, UINT8][0], results[FLOAT16, FLOAT16][0])
  89. assert not torch.allclose(results[UINT8, FLOAT16][1], results[UINT8, UINT8][1])
  90. assert not torch.allclose(results[UINT8, FLOAT16][0], results[FLOAT16, FLOAT16][0])
  91. assert not torch.allclose(results[UINT8, UINT8][0], results[FLOAT16, UINT8][0])
  92. assert not torch.allclose(results[FLOAT16, UINT8][1], results[FLOAT16, FLOAT16][1])
  93. reference = [(tensors1[i] + tensors2[i]) / 2 for i in range(len(tensors1))]
  94. for i in range(2):
  95. assert 0 < torch.mean(torch.square(results[FLOAT16, FLOAT16][i] - reference[i])).item() <= 1e-5
  96. assert 1e-5 < torch.mean(torch.square(results[UINT8, UINT8][i] - reference[i])).item() <= 1e-2
  97. class TrackedCompression(AdaptiveCompressionBase):
  98. def __init__(self, compression: CompressionBase):
  99. self.compression = compression
  100. self.mp_counter, self.mp_part_size = mp.Value(c_int32, 0), mp.Value(c_int32, 0)
  101. super().__init__()
  102. def choose_compression(self, info: CompressionInfo) -> CompressionBase:
  103. return self.compression
  104. def compress(self, tensor: torch.Tensor, info: CompressionInfo, allow_inplace: bool = False):
  105. self.mp_counter.value += 1
  106. if info.part_size is not None:
  107. self.mp_part_size.value = max(self.mp_part_size.value, info.part_size)
  108. return self.compression.compress(tensor, info=info, allow_inplace=allow_inplace)
  109. def make_params():
  110. return [
  111. nn.Parameter(x)
  112. for x in (
  113. torch.randn([]),
  114. torch.randn(1),
  115. torch.randn(100),
  116. torch.randn(1_000),
  117. torch.randn(5_000),
  118. torch.randn(10_000),
  119. )
  120. ]
  121. @pytest.mark.forked
  122. def test_adaptive_compression():
  123. UINT8 = TrackedCompression(Uniform8BitQuantization())
  124. FLOAT16 = TrackedCompression(Float16Compression())
  125. FLOAT32 = TrackedCompression(NoCompression())
  126. STATE_FP16 = TrackedCompression(Float16Compression())
  127. STATE_FP32 = TrackedCompression(NoCompression())
  128. averaging_compression_adaptive = RoleAdaptiveCompression(
  129. parameter=FLOAT16,
  130. gradient=SizeAdaptiveCompression(threshold=1_000, less=FLOAT16, greater_equal=UINT8),
  131. optimizer=FLOAT32,
  132. default=FLOAT32,
  133. )
  134. state_compression_adaptive = SizeAdaptiveCompression(
  135. threshold=500,
  136. less=STATE_FP32,
  137. greater_equal=STATE_FP16,
  138. )
  139. averager1 = hivemind.TrainingAverager(
  140. opt=torch.optim.Adam(make_params()),
  141. average_parameters=True,
  142. average_gradients=True,
  143. average_opt_statistics=("exp_avg",),
  144. compression=averaging_compression_adaptive,
  145. state_compression=state_compression_adaptive,
  146. prefix="test_avgr",
  147. target_group_size=2,
  148. part_size_bytes=5_000,
  149. start=True,
  150. dht=hivemind.DHT(start=True),
  151. )
  152. averager2 = hivemind.TrainingAverager(
  153. opt=torch.optim.Adam(make_params()),
  154. average_parameters=True,
  155. average_gradients=True,
  156. average_opt_statistics=("exp_avg",),
  157. compression=averaging_compression_adaptive,
  158. state_compression=state_compression_adaptive,
  159. prefix="test_avgr",
  160. target_group_size=2,
  161. part_size_bytes=5_000,
  162. start=True,
  163. dht=hivemind.DHT(initial_peers=averager1.dht.get_visible_maddrs(), start=True),
  164. )
  165. futures = [averager1.step(wait=False), averager2.step(wait=False)]
  166. for future in futures:
  167. future.result()
  168. assert UINT8.mp_counter.value == 4 # half gradients: 3 tensors, 1 is split
  169. assert UINT8.mp_part_size.value == 5_000 # single byte tensors
  170. assert FLOAT16.mp_counter.value == 13 # parameters and half gradients
  171. assert FLOAT16.mp_part_size.value == 2_500 # two-byte tensors
  172. assert FLOAT32.mp_counter.value == 16 # statistics
  173. assert FLOAT32.mp_part_size.value == 1250 # four-byte tensors
  174. averager1.load_state_from_peers()
  175. assert STATE_FP16.mp_counter.value == STATE_FP32.mp_counter.value == 9
  176. assert STATE_FP16.mp_part_size.value == STATE_FP32.mp_part_size.value == 0 # not partitioned