test_util_modules.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. import asyncio
  2. import concurrent.futures
  3. import multiprocessing as mp
  4. import random
  5. import time
  6. from concurrent.futures import ThreadPoolExecutor
  7. import numpy as np
  8. import pytest
  9. import torch
  10. import hivemind
  11. from hivemind.optim.performance_ema import PerformanceEMA
  12. from hivemind.proto.dht_pb2_grpc import DHTStub
  13. from hivemind.proto.runtime_pb2 import CompressionType
  14. from hivemind.proto.runtime_pb2_grpc import ConnectionHandlerStub
  15. from hivemind.utils import DHTExpiration, HeapEntry, MSGPackSerializer, ValueWithExpiration
  16. from hivemind.utils.asyncio import (
  17. achain,
  18. aenumerate,
  19. afirst,
  20. aiter_with_timeout,
  21. amap_in_executor,
  22. anext,
  23. as_aiter,
  24. asingle,
  25. azip,
  26. cancel_and_wait,
  27. )
  28. from hivemind.utils.compression import deserialize_torch_tensor, serialize_torch_tensor
  29. from hivemind.utils.mpfuture import InvalidStateError
  30. @pytest.mark.forked
  31. def test_mpfuture_result():
  32. future = hivemind.MPFuture()
  33. def _proc(future):
  34. with pytest.raises(RuntimeError):
  35. future.result() # only creator process can await result
  36. future.set_result(321)
  37. p = mp.Process(target=_proc, args=(future,))
  38. p.start()
  39. p.join()
  40. assert future.result() == 321
  41. assert future.exception() is None
  42. assert future.cancel() is False
  43. assert future.done() and not future.running() and not future.cancelled()
  44. future = hivemind.MPFuture()
  45. with pytest.raises(concurrent.futures.TimeoutError):
  46. future.result(timeout=1e-3)
  47. future.set_result(["abacaba", 123])
  48. assert future.result() == ["abacaba", 123]
  49. @pytest.mark.forked
  50. def test_mpfuture_exception():
  51. future = hivemind.MPFuture()
  52. with pytest.raises(concurrent.futures.TimeoutError):
  53. future.exception(timeout=1e-3)
  54. def _proc(future):
  55. future.set_exception(NotImplementedError())
  56. p = mp.Process(target=_proc, args=(future,))
  57. p.start()
  58. p.join()
  59. assert isinstance(future.exception(), NotImplementedError)
  60. with pytest.raises(NotImplementedError):
  61. future.result()
  62. assert future.cancel() is False
  63. assert future.done() and not future.running() and not future.cancelled()
  64. @pytest.mark.forked
  65. def test_mpfuture_cancel():
  66. future = hivemind.MPFuture()
  67. assert not future.cancelled()
  68. future.cancel()
  69. evt = mp.Event()
  70. def _proc():
  71. with pytest.raises(concurrent.futures.CancelledError):
  72. future.result()
  73. with pytest.raises(concurrent.futures.CancelledError):
  74. future.exception()
  75. with pytest.raises(InvalidStateError):
  76. future.set_result(123)
  77. with pytest.raises(InvalidStateError):
  78. future.set_exception(NotImplementedError())
  79. assert future.cancelled() and future.done() and not future.running()
  80. evt.set()
  81. p = mp.Process(target=_proc)
  82. p.start()
  83. p.join()
  84. assert evt.is_set()
  85. @pytest.mark.forked
  86. def test_mpfuture_status():
  87. evt = mp.Event()
  88. future = hivemind.MPFuture()
  89. def _proc1(future):
  90. assert future.set_running_or_notify_cancel() is True
  91. evt.set()
  92. p = mp.Process(target=_proc1, args=(future,))
  93. p.start()
  94. p.join()
  95. assert evt.is_set()
  96. evt.clear()
  97. assert future.running() and not future.done() and not future.cancelled()
  98. with pytest.raises(InvalidStateError):
  99. future.set_running_or_notify_cancel()
  100. future = hivemind.MPFuture()
  101. assert future.cancel()
  102. def _proc2(future):
  103. assert not future.running() and future.done() and future.cancelled()
  104. assert future.set_running_or_notify_cancel() is False
  105. evt.set()
  106. p = mp.Process(target=_proc2, args=(future,))
  107. p.start()
  108. p.join()
  109. evt.set()
  110. future2 = hivemind.MPFuture()
  111. future2.cancel()
  112. assert future2.set_running_or_notify_cancel() is False
  113. @pytest.mark.asyncio
  114. async def test_await_mpfuture():
  115. # await result from the same process, but a different coroutine
  116. f1, f2 = hivemind.MPFuture(), hivemind.MPFuture()
  117. async def wait_and_assign_async():
  118. assert f2.set_running_or_notify_cancel() is True
  119. await asyncio.sleep(0.1)
  120. f1.set_result((123, "ololo"))
  121. f2.set_result((456, "pyshpysh"))
  122. asyncio.create_task(wait_and_assign_async())
  123. assert (await asyncio.gather(f1, f2)) == [(123, "ololo"), (456, "pyshpysh")]
  124. # await result from separate processes
  125. f1, f2 = hivemind.MPFuture(), hivemind.MPFuture()
  126. def wait_and_assign(future, value):
  127. time.sleep(0.1 * random.random())
  128. future.set_result(value)
  129. p1 = mp.Process(target=wait_and_assign, args=(f1, "abc"))
  130. p2 = mp.Process(target=wait_and_assign, args=(f2, "def"))
  131. for p in p1, p2:
  132. p.start()
  133. assert (await asyncio.gather(f1, f2)) == ["abc", "def"]
  134. for p in p1, p2:
  135. p.join()
  136. # await cancel
  137. f1, f2 = hivemind.MPFuture(), hivemind.MPFuture()
  138. def wait_and_cancel():
  139. time.sleep(0.01)
  140. f2.set_result(123456)
  141. time.sleep(0.1)
  142. f1.cancel()
  143. p = mp.Process(target=wait_and_cancel)
  144. p.start()
  145. with pytest.raises(asyncio.CancelledError):
  146. # note: it is intended that MPFuture raises Cancel
  147. await asyncio.gather(f1, f2)
  148. p.join()
  149. # await exception
  150. f1, f2 = hivemind.MPFuture(), hivemind.MPFuture()
  151. def wait_and_raise():
  152. time.sleep(0.01)
  153. f2.set_result(123456)
  154. time.sleep(0.1)
  155. f1.set_exception(ValueError("we messed up"))
  156. p = mp.Process(target=wait_and_raise)
  157. p.start()
  158. with pytest.raises(ValueError):
  159. # note: it is intended that MPFuture raises Cancel
  160. await asyncio.gather(f1, f2)
  161. p.join()
  162. @pytest.mark.forked
  163. def test_mpfuture_bidirectional():
  164. evt = mp.Event()
  165. future_from_main = hivemind.MPFuture()
  166. def _future_creator():
  167. future_from_fork = hivemind.MPFuture()
  168. future_from_main.set_result(("abc", future_from_fork))
  169. if future_from_fork.result() == ["we", "need", "to", "go", "deeper"]:
  170. evt.set()
  171. p = mp.Process(target=_future_creator)
  172. p.start()
  173. out = future_from_main.result()
  174. assert isinstance(out[1], hivemind.MPFuture)
  175. out[1].set_result(["we", "need", "to", "go", "deeper"])
  176. p.join()
  177. assert evt.is_set()
  178. @pytest.mark.forked
  179. def test_mpfuture_done_callback():
  180. receiver, sender = mp.Pipe(duplex=False)
  181. events = [mp.Event() for _ in range(6)]
  182. def _future_creator():
  183. future1, future2, future3 = hivemind.MPFuture(), hivemind.MPFuture(), hivemind.MPFuture()
  184. def _check_result_and_set(future):
  185. assert future.done()
  186. assert future.result() == 123
  187. events[0].set()
  188. future1.add_done_callback(_check_result_and_set)
  189. future1.add_done_callback(lambda future: events[1].set())
  190. future2.add_done_callback(lambda future: events[2].set())
  191. future3.add_done_callback(lambda future: events[3].set())
  192. sender.send((future1, future2))
  193. future2.cancel() # trigger future2 callback from the same process
  194. events[0].wait()
  195. future1.add_done_callback(
  196. lambda future: events[4].set()
  197. ) # schedule callback after future1 is already finished
  198. events[5].wait()
  199. p = mp.Process(target=_future_creator)
  200. p.start()
  201. future1, future2 = receiver.recv()
  202. future1.set_result(123)
  203. with pytest.raises(RuntimeError):
  204. future1.add_done_callback(lambda future: (1, 2, 3))
  205. assert future1.done() and not future1.cancelled()
  206. assert future2.done() and future2.cancelled()
  207. for i in 0, 1, 4:
  208. events[i].wait(1)
  209. assert events[0].is_set() and events[1].is_set() and events[2].is_set() and events[4].is_set()
  210. assert not events[3].is_set()
  211. events[5].set()
  212. p.join()
  213. @pytest.mark.forked
  214. def test_many_futures():
  215. evt = mp.Event()
  216. receiver, sender = mp.Pipe()
  217. main_futures = [hivemind.MPFuture() for _ in range(1000)]
  218. assert len(hivemind.MPFuture._active_futures) == 1000
  219. def _run_peer():
  220. fork_futures = [hivemind.MPFuture() for _ in range(500)]
  221. assert len(hivemind.MPFuture._active_futures) == 500
  222. for i, future in enumerate(random.sample(main_futures, 300)):
  223. if random.random() < 0.5:
  224. future.set_result(i)
  225. else:
  226. future.set_exception(ValueError(f"{i}"))
  227. sender.send(fork_futures[:-100])
  228. for future in fork_futures[-100:]:
  229. future.cancel()
  230. evt.wait()
  231. assert len(hivemind.MPFuture._active_futures) == 200
  232. for future in fork_futures:
  233. if not future.done():
  234. future.set_result(123)
  235. assert len(hivemind.MPFuture._active_futures) == 0
  236. p = mp.Process(target=_run_peer)
  237. p.start()
  238. some_fork_futures = receiver.recv()
  239. assert len(hivemind.MPFuture._active_futures) == 700
  240. for future in some_fork_futures:
  241. future.set_running_or_notify_cancel()
  242. for future in random.sample(some_fork_futures, 200):
  243. future.set_result(321)
  244. evt.set()
  245. for future in main_futures:
  246. future.cancel()
  247. assert len(hivemind.MPFuture._active_futures) == 0
  248. p.join()
  249. def test_tensor_compression(size=(128, 128, 64), alpha=5e-08, beta=0.0008):
  250. torch.manual_seed(0)
  251. X = torch.randn(*size)
  252. assert torch.allclose(deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.NONE)), X)
  253. error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.MEANSTD_16BIT)) - X
  254. assert error.square().mean() < alpha
  255. error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.FLOAT16)) - X
  256. assert error.square().mean() < alpha
  257. error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.QUANTILE_8BIT)) - X
  258. assert error.square().mean() < beta
  259. error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.UNIFORM_8BIT)) - X
  260. assert error.square().mean() < beta
  261. zeros = torch.zeros(5, 5)
  262. for compression_type in CompressionType.values():
  263. assert deserialize_torch_tensor(serialize_torch_tensor(zeros, compression_type)).isfinite().all()
  264. @pytest.mark.forked
  265. @pytest.mark.asyncio
  266. async def test_channel_cache():
  267. hivemind.ChannelCache.MAXIMUM_CHANNELS = 3
  268. hivemind.ChannelCache.EVICTION_PERIOD_SECONDS = 0.1
  269. c1 = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=False)
  270. c2 = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=True)
  271. c3 = hivemind.ChannelCache.get_stub("localhost:1338", DHTStub, aio=False)
  272. c3_again = hivemind.ChannelCache.get_stub("localhost:1338", DHTStub, aio=False)
  273. c1_again = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=False)
  274. c4 = hivemind.ChannelCache.get_stub("localhost:1339", DHTStub, aio=True)
  275. c2_anew = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=True)
  276. c1_yetagain = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=False)
  277. await asyncio.sleep(0.2)
  278. c1_anew = hivemind.ChannelCache.get_stub(target="localhost:1337", aio=False, stub_type=DHTStub)
  279. c1_anew_again = hivemind.ChannelCache.get_stub(target="localhost:1337", aio=False, stub_type=DHTStub)
  280. c1_otherstub = hivemind.ChannelCache.get_stub(target="localhost:1337", aio=False, stub_type=ConnectionHandlerStub)
  281. await asyncio.sleep(0.05)
  282. c1_otherstub_again = hivemind.ChannelCache.get_stub(
  283. target="localhost:1337", aio=False, stub_type=ConnectionHandlerStub
  284. )
  285. all_channels = [c1, c2, c3, c4, c3_again, c1_again, c2_anew, c1_yetagain, c1_anew, c1_anew_again, c1_otherstub]
  286. assert all(isinstance(c, DHTStub) for c in all_channels[:-1])
  287. assert isinstance(all_channels[-1], ConnectionHandlerStub)
  288. assert "aio" in repr(c2.rpc_find)
  289. assert "aio" not in repr(c1.rpc_find)
  290. duplicates = {
  291. (c1, c1_again),
  292. (c1, c1_yetagain),
  293. (c1_again, c1_yetagain),
  294. (c3, c3_again),
  295. (c1_anew, c1_anew_again),
  296. (c1_otherstub, c1_otherstub_again),
  297. }
  298. for i in range(len(all_channels)):
  299. for j in range(i + 1, len(all_channels)):
  300. ci, cj = all_channels[i], all_channels[j]
  301. assert (ci is cj) == ((ci, cj) in duplicates), (i, j)
  302. def test_serialize_tensor():
  303. tensor = torch.randn(512, 12288)
  304. serialized_tensor = serialize_torch_tensor(tensor, CompressionType.NONE)
  305. for chunk_size in [1024, 64 * 1024, 64 * 1024 + 1, 10 ** 9]:
  306. chunks = list(hivemind.split_for_streaming(serialized_tensor, chunk_size))
  307. assert len(chunks) == (len(serialized_tensor.buffer) - 1) // chunk_size + 1
  308. restored = hivemind.combine_from_streaming(chunks)
  309. assert torch.allclose(deserialize_torch_tensor(restored), tensor)
  310. chunk_size = 30 * 1024
  311. serialized_tensor = serialize_torch_tensor(tensor, CompressionType.FLOAT16)
  312. chunks = list(hivemind.split_for_streaming(serialized_tensor, chunk_size))
  313. assert len(chunks) == (len(serialized_tensor.buffer) - 1) // chunk_size + 1
  314. restored = hivemind.combine_from_streaming(chunks)
  315. assert torch.allclose(deserialize_torch_tensor(restored), tensor, rtol=0, atol=1e-2)
  316. tensor = torch.randint(0, 100, (512, 1, 1))
  317. serialized_tensor = serialize_torch_tensor(tensor, CompressionType.NONE)
  318. chunks = list(hivemind.split_for_streaming(serialized_tensor, chunk_size))
  319. assert len(chunks) == (len(serialized_tensor.buffer) - 1) // chunk_size + 1
  320. restored = hivemind.combine_from_streaming(chunks)
  321. assert torch.allclose(deserialize_torch_tensor(restored), tensor)
  322. scalar = torch.tensor(1.0)
  323. serialized_scalar = serialize_torch_tensor(scalar, CompressionType.NONE)
  324. assert torch.allclose(deserialize_torch_tensor(serialized_scalar), scalar)
  325. serialized_scalar = serialize_torch_tensor(scalar, CompressionType.FLOAT16)
  326. assert torch.allclose(deserialize_torch_tensor(serialized_scalar), scalar)
  327. def test_serialize_tuple():
  328. test_pairs = (
  329. ((1, 2, 3), [1, 2, 3]),
  330. (("1", False, 0), ["1", False, 0]),
  331. (("1", False, 0), ("1", 0, 0)),
  332. (("1", b"qq", (2, 5, "0")), ["1", b"qq", (2, 5, "0")]),
  333. )
  334. for first, second in test_pairs:
  335. assert MSGPackSerializer.loads(MSGPackSerializer.dumps(first)) == first
  336. assert MSGPackSerializer.loads(MSGPackSerializer.dumps(second)) == second
  337. assert MSGPackSerializer.dumps(first) != MSGPackSerializer.dumps(second)
  338. def test_split_parts():
  339. tensor = torch.randn(910, 512)
  340. serialized_tensor_part = serialize_torch_tensor(tensor, allow_inplace=False)
  341. chunks1 = list(hivemind.utils.split_for_streaming(serialized_tensor_part, 16384))
  342. assert len(chunks1) == int(np.ceil(tensor.numel() * tensor.element_size() / 16384))
  343. chunks2 = list(hivemind.utils.split_for_streaming(serialized_tensor_part, 10_000))
  344. assert len(chunks2) == int(np.ceil(tensor.numel() * tensor.element_size() / 10_000))
  345. chunks3 = list(hivemind.utils.split_for_streaming(serialized_tensor_part, 10 ** 9))
  346. assert len(chunks3) == 1
  347. compressed_tensor_part = serialize_torch_tensor(tensor, CompressionType.FLOAT16, allow_inplace=False)
  348. chunks4 = list(hivemind.utils.split_for_streaming(compressed_tensor_part, 16384))
  349. assert len(chunks4) == int(np.ceil(tensor.numel() * 2 / 16384))
  350. combined1 = hivemind.utils.combine_from_streaming(chunks1)
  351. combined2 = hivemind.utils.combine_from_streaming(iter(chunks2))
  352. combined3 = hivemind.utils.combine_from_streaming(chunks3)
  353. combined4 = hivemind.utils.combine_from_streaming(chunks4)
  354. for combined in combined1, combined2, combined3:
  355. assert torch.allclose(tensor, deserialize_torch_tensor(combined), rtol=1e-5, atol=1e-8)
  356. assert torch.allclose(tensor, deserialize_torch_tensor(combined4), rtol=1e-3, atol=1e-3)
  357. combined_incomplete = hivemind.utils.combine_from_streaming(chunks4[:5])
  358. combined_incomplete2 = hivemind.utils.combine_from_streaming(chunks4[:1])
  359. combined_incomplete3 = hivemind.utils.combine_from_streaming(chunks4[:-1])
  360. for combined in combined_incomplete, combined_incomplete2, combined_incomplete3:
  361. with pytest.raises(RuntimeError):
  362. deserialize_torch_tensor(combined)
  363. # note: we rely on this being RuntimeError in hivemind.averaging.allreduce.AllreduceRunner
  364. def test_generic_data_classes():
  365. value_with_exp = ValueWithExpiration(value="string_value", expiration_time=DHTExpiration(10))
  366. assert value_with_exp.value == "string_value" and value_with_exp.expiration_time == DHTExpiration(10)
  367. heap_entry = HeapEntry(expiration_time=DHTExpiration(10), key="string_value")
  368. assert heap_entry.key == "string_value" and heap_entry.expiration_time == DHTExpiration(10)
  369. sorted_expirations = sorted([DHTExpiration(value) for value in range(1, 1000)])
  370. sorted_heap_entries = sorted([HeapEntry(DHTExpiration(value), key="any") for value in range(1, 1000)[::-1]])
  371. assert all([entry.expiration_time == value for entry, value in zip(sorted_heap_entries, sorted_expirations)])
  372. @pytest.mark.asyncio
  373. async def test_asyncio_utils():
  374. res = [i async for i, item in aenumerate(as_aiter("a", "b", "c"))]
  375. assert res == list(range(len(res)))
  376. num_steps = 0
  377. async for elem in amap_in_executor(lambda x: x ** 2, as_aiter(*range(100)), max_prefetch=5):
  378. assert elem == num_steps ** 2
  379. num_steps += 1
  380. assert num_steps == 100
  381. ours = [
  382. elem
  383. async for elem in amap_in_executor(max, as_aiter(*range(7)), as_aiter(*range(-50, 50, 10)), max_prefetch=1)
  384. ]
  385. ref = list(map(max, range(7), range(-50, 50, 10)))
  386. assert ours == ref
  387. ours = [row async for row in azip(as_aiter("a", "b", "c"), as_aiter(1, 2, 3))]
  388. ref = list(zip(["a", "b", "c"], [1, 2, 3]))
  389. assert ours == ref
  390. async def _aiterate():
  391. yield "foo"
  392. yield "bar"
  393. yield "baz"
  394. iterator = _aiterate()
  395. assert (await anext(iterator)) == "foo"
  396. tail = [item async for item in iterator]
  397. assert tail == ["bar", "baz"]
  398. with pytest.raises(StopAsyncIteration):
  399. await anext(iterator)
  400. assert [item async for item in achain(_aiterate(), as_aiter(*range(5)))] == ["foo", "bar", "baz"] + list(range(5))
  401. assert await asingle(as_aiter(1)) == 1
  402. with pytest.raises(ValueError):
  403. await asingle(as_aiter())
  404. with pytest.raises(ValueError):
  405. await asingle(as_aiter(1, 2, 3))
  406. assert await afirst(as_aiter(1)) == 1
  407. assert await afirst(as_aiter()) is None
  408. assert await afirst(as_aiter(), -1) == -1
  409. assert await afirst(as_aiter(1, 2, 3)) == 1
  410. async def iterate_with_delays(delays):
  411. for i, delay in enumerate(delays):
  412. await asyncio.sleep(delay)
  413. yield i
  414. async for _ in aiter_with_timeout(iterate_with_delays([0.1] * 5), timeout=0.2):
  415. pass
  416. sleepy_aiter = iterate_with_delays([0.1, 0.1, 0.3, 0.1, 0.1])
  417. num_steps = 0
  418. with pytest.raises(asyncio.TimeoutError):
  419. async for _ in aiter_with_timeout(sleepy_aiter, timeout=0.2):
  420. num_steps += 1
  421. assert num_steps == 2
  422. @pytest.mark.asyncio
  423. async def test_cancel_and_wait():
  424. finished_gracefully = False
  425. async def coro_with_finalizer():
  426. nonlocal finished_gracefully
  427. try:
  428. await asyncio.Event().wait()
  429. except asyncio.CancelledError:
  430. await asyncio.sleep(0.05)
  431. finished_gracefully = True
  432. raise
  433. task = asyncio.create_task(coro_with_finalizer())
  434. await asyncio.sleep(0.05)
  435. assert await cancel_and_wait(task)
  436. assert finished_gracefully
  437. async def coro_with_result():
  438. return 777
  439. async def coro_with_error():
  440. raise ValueError("error")
  441. task_with_result = asyncio.create_task(coro_with_result())
  442. task_with_error = asyncio.create_task(coro_with_error())
  443. await asyncio.sleep(0.05)
  444. assert not await cancel_and_wait(task_with_result)
  445. assert not await cancel_and_wait(task_with_error)
  446. @pytest.mark.parametrize("max_workers", [1, 2, 10])
  447. def test_performance_ema_threadsafe(
  448. max_workers: int = 2,
  449. interval: float = 0.01,
  450. num_updates: int = 100,
  451. alpha: float = 0.05,
  452. bias_power: float = 0.7,
  453. tolerance: float = 0.05,
  454. ):
  455. def run_task(ema):
  456. task_size = random.randint(1, 4)
  457. with ema.update_threadsafe(task_size):
  458. time.sleep(task_size * interval * (0.9 + 0.2 * random.random()))
  459. return task_size
  460. with ThreadPoolExecutor(max_workers) as pool:
  461. ema = PerformanceEMA(alpha=alpha)
  462. start_time = time.perf_counter()
  463. futures = [pool.submit(run_task, ema) for i in range(num_updates)]
  464. total_size = sum(future.result() for future in futures)
  465. end_time = time.perf_counter()
  466. target = total_size / (end_time - start_time)
  467. assert ema.samples_per_second >= (1 - tolerance) * target * max_workers ** (bias_power - 1)
  468. assert ema.samples_per_second <= (1 + tolerance) * target