test_dht_node.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. import asyncio
  2. import heapq
  3. import multiprocessing as mp
  4. import random
  5. import signal
  6. from itertools import product
  7. from typing import List, Sequence, Tuple
  8. import numpy as np
  9. import pytest
  10. from multiaddr import Multiaddr
  11. import hivemind
  12. from hivemind import get_dht_time
  13. from hivemind.dht.node import DHTID, DHTNode
  14. from hivemind.dht.protocol import DHTProtocol
  15. from hivemind.dht.storage import DictionaryDHTValue
  16. from hivemind.p2p import P2P, PeerID
  17. from hivemind.utils.logging import get_logger
  18. from test_utils.dht_swarms import launch_swarm_in_separate_processes, launch_star_shaped_swarm
  19. logger = get_logger(__name__)
  20. def maddrs_to_peer_ids(maddrs: List[Multiaddr]) -> List[PeerID]:
  21. return list({PeerID.from_base58(maddr["p2p"]) for maddr in maddrs})
  22. def run_protocol_listener(
  23. dhtid: DHTID, maddr_conn: mp.connection.Connection, initial_peers: Sequence[Multiaddr]
  24. ) -> None:
  25. loop = asyncio.get_event_loop()
  26. p2p = loop.run_until_complete(P2P.create(initial_peers=initial_peers))
  27. visible_maddrs = loop.run_until_complete(p2p.get_visible_maddrs())
  28. protocol = loop.run_until_complete(
  29. DHTProtocol.create(p2p, dhtid, bucket_size=20, depth_modulo=5, num_replicas=3, wait_timeout=5)
  30. )
  31. logger.info(f"Started peer id={protocol.node_id} visible_maddrs={visible_maddrs}")
  32. for peer_id in maddrs_to_peer_ids(initial_peers):
  33. loop.run_until_complete(protocol.call_ping(peer_id))
  34. maddr_conn.send((p2p.id, visible_maddrs))
  35. async def shutdown():
  36. await p2p.shutdown()
  37. logger.info(f"Finished peer id={protocol.node_id} maddrs={visible_maddrs}")
  38. loop.stop()
  39. loop.add_signal_handler(signal.SIGTERM, lambda: loop.create_task(shutdown()))
  40. loop.run_forever()
  41. def launch_protocol_listener(
  42. initial_peers: Sequence[Multiaddr] = (),
  43. ) -> Tuple[DHTID, mp.Process, PeerID, List[Multiaddr]]:
  44. remote_conn, local_conn = mp.Pipe()
  45. dht_id = DHTID.generate()
  46. process = mp.Process(target=run_protocol_listener, args=(dht_id, remote_conn, initial_peers), daemon=True)
  47. process.start()
  48. peer_id, visible_maddrs = local_conn.recv()
  49. return dht_id, process, peer_id, visible_maddrs
  50. # note: we run network-related tests in a separate process to re-initialize all global states from scratch
  51. # this helps us avoid undesirable gRPC side-effects (e.g. segfaults) when running multiple tests in sequence
  52. @pytest.mark.forked
  53. def test_dht_protocol():
  54. peer1_node_id, peer1_proc, peer1_id, peer1_maddrs = launch_protocol_listener()
  55. peer2_node_id, peer2_proc, peer2_id, _ = launch_protocol_listener(initial_peers=peer1_maddrs)
  56. loop = asyncio.get_event_loop()
  57. for listen in [False, True]: # note: order matters, this test assumes that first run uses listen=False
  58. p2p = loop.run_until_complete(P2P.create(initial_peers=peer1_maddrs))
  59. protocol = loop.run_until_complete(
  60. DHTProtocol.create(
  61. p2p, DHTID.generate(), bucket_size=20, depth_modulo=5, wait_timeout=5, num_replicas=3, listen=listen
  62. )
  63. )
  64. logger.info(f"Self id={protocol.node_id}")
  65. assert loop.run_until_complete(protocol.call_ping(peer1_id)) == peer1_node_id
  66. key, value, expiration = DHTID.generate(), [random.random(), {"ololo": "pyshpysh"}], get_dht_time() + 1e3
  67. store_ok = loop.run_until_complete(
  68. protocol.call_store(peer1_id, [key], [hivemind.MSGPackSerializer.dumps(value)], expiration)
  69. )
  70. assert all(store_ok), "DHT rejected a trivial store"
  71. # peer 1 must know about peer 2
  72. (recv_value_bytes, recv_expiration), nodes_found = loop.run_until_complete(
  73. protocol.call_find(peer1_id, [key])
  74. )[key]
  75. recv_value = hivemind.MSGPackSerializer.loads(recv_value_bytes)
  76. (recv_id, recv_peer_id) = next(iter(nodes_found.items()))
  77. assert (
  78. recv_id == peer2_node_id and recv_peer_id == peer2_id
  79. ), f"expected id={peer2_node_id}, peer={peer2_id} but got {recv_id}, {recv_peer_id}"
  80. assert recv_value == value and recv_expiration == expiration, (
  81. f"call_find_value expected {value} (expires by {expiration}) "
  82. f"but got {recv_value} (expires by {recv_expiration})"
  83. )
  84. # peer 2 must know about peer 1, but not have a *random* nonexistent value
  85. dummy_key = DHTID.generate()
  86. empty_item, nodes_found_2 = loop.run_until_complete(protocol.call_find(peer2_id, [dummy_key]))[dummy_key]
  87. assert empty_item is None, "Non-existent keys shouldn't have values"
  88. (recv_id, recv_peer_id) = next(iter(nodes_found_2.items()))
  89. assert (
  90. recv_id == peer1_node_id and recv_peer_id == peer1_id
  91. ), f"expected id={peer1_node_id}, peer={peer1_id} but got {recv_id}, {recv_peer_id}"
  92. # cause a non-response by querying a nonexistent peer
  93. assert loop.run_until_complete(protocol.call_find(PeerID.from_base58("fakeid"), [key])) is None
  94. # store/get a dictionary with sub-keys
  95. nested_key, subkey1, subkey2 = DHTID.generate(), "foo", "bar"
  96. value1, value2 = [random.random(), {"ololo": "pyshpysh"}], "abacaba"
  97. assert loop.run_until_complete(
  98. protocol.call_store(
  99. peer1_id,
  100. keys=[nested_key],
  101. values=[hivemind.MSGPackSerializer.dumps(value1)],
  102. expiration_time=[expiration],
  103. subkeys=[subkey1],
  104. )
  105. )
  106. assert loop.run_until_complete(
  107. protocol.call_store(
  108. peer1_id,
  109. keys=[nested_key],
  110. values=[hivemind.MSGPackSerializer.dumps(value2)],
  111. expiration_time=[expiration + 5],
  112. subkeys=[subkey2],
  113. )
  114. )
  115. (recv_dict, recv_expiration), nodes_found = loop.run_until_complete(
  116. protocol.call_find(peer1_id, [nested_key])
  117. )[nested_key]
  118. assert isinstance(recv_dict, DictionaryDHTValue)
  119. assert len(recv_dict.data) == 2 and recv_expiration == expiration + 5
  120. assert recv_dict.data[subkey1] == (protocol.serializer.dumps(value1), expiration)
  121. assert recv_dict.data[subkey2] == (protocol.serializer.dumps(value2), expiration + 5)
  122. if listen:
  123. loop.run_until_complete(p2p.shutdown())
  124. peer1_proc.terminate()
  125. peer2_proc.terminate()
  126. @pytest.mark.forked
  127. def test_empty_table():
  128. """Test RPC methods with empty routing table"""
  129. peer_id, peer_proc, peer_peer_id, peer_maddrs = launch_protocol_listener()
  130. loop = asyncio.get_event_loop()
  131. p2p = loop.run_until_complete(P2P.create(initial_peers=peer_maddrs))
  132. protocol = loop.run_until_complete(
  133. DHTProtocol.create(
  134. p2p, DHTID.generate(), bucket_size=20, depth_modulo=5, wait_timeout=5, num_replicas=3, listen=False
  135. )
  136. )
  137. key, value, expiration = DHTID.generate(), [random.random(), {"ololo": "pyshpysh"}], get_dht_time() + 1e3
  138. empty_item, nodes_found = loop.run_until_complete(protocol.call_find(peer_peer_id, [key]))[key]
  139. assert empty_item is None and len(nodes_found) == 0
  140. assert all(
  141. loop.run_until_complete(
  142. protocol.call_store(peer_peer_id, [key], [hivemind.MSGPackSerializer.dumps(value)], expiration)
  143. )
  144. ), "peer rejected store"
  145. (recv_value_bytes, recv_expiration), nodes_found = loop.run_until_complete(
  146. protocol.call_find(peer_peer_id, [key])
  147. )[key]
  148. recv_value = hivemind.MSGPackSerializer.loads(recv_value_bytes)
  149. assert len(nodes_found) == 0
  150. assert recv_value == value and recv_expiration == expiration
  151. assert loop.run_until_complete(protocol.call_ping(peer_peer_id)) == peer_id
  152. assert loop.run_until_complete(protocol.call_ping(PeerID.from_base58("fakeid"))) is None
  153. peer_proc.terminate()
  154. @pytest.mark.forked
  155. def test_dht_node():
  156. # step A: create a swarm of 50 dht nodes in separate processes
  157. # (first 5 created sequentially, others created in parallel)
  158. processes, dht, swarm_maddrs = launch_swarm_in_separate_processes(n_peers=50, n_sequential_peers=5)
  159. # step B: run 51-st node in this process
  160. loop = asyncio.get_event_loop()
  161. initial_peers = random.choice(swarm_maddrs)
  162. me = loop.run_until_complete(
  163. DHTNode.create(initial_peers=initial_peers, parallel_rpc=10, cache_refresh_before_expiry=False)
  164. )
  165. # test 1: find self
  166. nearest = loop.run_until_complete(me.find_nearest_nodes([me.node_id], k_nearest=1))[me.node_id]
  167. assert len(nearest) == 1 and nearest[me.node_id] == me.peer_id
  168. # test 2: find others
  169. for _ in range(10):
  170. ref_peer_id, query_id = random.choice(list(dht.items()))
  171. nearest = loop.run_until_complete(me.find_nearest_nodes([query_id], k_nearest=1))[query_id]
  172. assert len(nearest) == 1
  173. found_node_id, found_peer_id = next(iter(nearest.items()))
  174. assert found_node_id == query_id and found_peer_id == ref_peer_id
  175. # test 3: find neighbors to random nodes
  176. accuracy_numerator = accuracy_denominator = 0 # top-1 nearest neighbor accuracy
  177. jaccard_numerator = jaccard_denominator = 0 # jaccard similarity aka intersection over union
  178. all_node_ids = list(dht.values())
  179. for _ in range(10):
  180. query_id = DHTID.generate()
  181. k_nearest = random.randint(1, 10)
  182. exclude_self = random.random() > 0.5
  183. nearest = loop.run_until_complete(
  184. me.find_nearest_nodes([query_id], k_nearest=k_nearest, exclude_self=exclude_self)
  185. )[query_id]
  186. nearest_nodes = list(nearest) # keys from ordered dict
  187. assert len(nearest_nodes) == k_nearest, "beam search must return exactly k_nearest results"
  188. assert me.node_id not in nearest_nodes or not exclude_self, "if exclude, results shouldn't contain self"
  189. assert np.all(np.diff(query_id.xor_distance(nearest_nodes)) >= 0), "results must be sorted by distance"
  190. ref_nearest = heapq.nsmallest(k_nearest + 1, all_node_ids, key=query_id.xor_distance)
  191. if exclude_self and me.node_id in ref_nearest:
  192. ref_nearest.remove(me.node_id)
  193. if len(ref_nearest) > k_nearest:
  194. ref_nearest.pop()
  195. accuracy_numerator += nearest_nodes[0] == ref_nearest[0]
  196. accuracy_denominator += 1
  197. jaccard_numerator += len(set.intersection(set(nearest_nodes), set(ref_nearest)))
  198. jaccard_denominator += k_nearest
  199. accuracy = accuracy_numerator / accuracy_denominator
  200. logger.debug(f"Top-1 accuracy: {accuracy}") # should be 98-100%
  201. jaccard_index = jaccard_numerator / jaccard_denominator
  202. logger.debug(f"Jaccard index (intersection over union): {jaccard_index}") # should be 95-100%
  203. assert accuracy >= 0.9, f"Top-1 accuracy only {accuracy} ({accuracy_numerator} / {accuracy_denominator})"
  204. assert jaccard_index >= 0.9, f"Jaccard index only {accuracy} ({accuracy_numerator} / {accuracy_denominator})"
  205. # test 4: find all nodes
  206. dummy = DHTID.generate()
  207. nearest = loop.run_until_complete(me.find_nearest_nodes([dummy], k_nearest=len(dht) + 100))[dummy]
  208. assert len(nearest) == len(dht) + 1
  209. assert len(set.difference(set(nearest.keys()), set(all_node_ids) | {me.node_id})) == 0
  210. # test 5: node without peers
  211. detached_node = loop.run_until_complete(DHTNode.create())
  212. nearest = loop.run_until_complete(detached_node.find_nearest_nodes([dummy]))[dummy]
  213. assert len(nearest) == 1 and nearest[detached_node.node_id] == detached_node.peer_id
  214. nearest = loop.run_until_complete(detached_node.find_nearest_nodes([dummy], exclude_self=True))[dummy]
  215. assert len(nearest) == 0
  216. # test 6: store and get value
  217. true_time = get_dht_time() + 1200
  218. assert loop.run_until_complete(me.store("mykey", ["Value", 10], true_time))
  219. initial_peers = random.choice(swarm_maddrs)
  220. that_guy = loop.run_until_complete(
  221. DHTNode.create(
  222. initial_peers=initial_peers, parallel_rpc=10, cache_refresh_before_expiry=False, cache_locally=False
  223. )
  224. )
  225. for node in [me, that_guy]:
  226. val, expiration_time = loop.run_until_complete(node.get("mykey"))
  227. assert val == ["Value", 10], "Wrong value"
  228. assert expiration_time == true_time, f"Wrong time"
  229. assert loop.run_until_complete(detached_node.get("mykey")) is None
  230. # test 7: bulk store and bulk get
  231. keys = "foo", "bar", "baz", "zzz"
  232. values = 3, 2, "batman", [1, 2, 3]
  233. store_ok = loop.run_until_complete(me.store_many(keys, values, expiration_time=get_dht_time() + 999))
  234. assert all(store_ok.values()), "failed to store one or more keys"
  235. response = loop.run_until_complete(me.get_many(keys[::-1]))
  236. for key, value in zip(keys, values):
  237. assert key in response and response[key][0] == value
  238. # test 8: store dictionaries as values (with sub-keys)
  239. upper_key, subkey1, subkey2, subkey3 = "ololo", "k1", "k2", "k3"
  240. now = get_dht_time()
  241. assert loop.run_until_complete(me.store(upper_key, subkey=subkey1, value=123, expiration_time=now + 10))
  242. assert loop.run_until_complete(me.store(upper_key, subkey=subkey2, value=456, expiration_time=now + 20))
  243. for node in [that_guy, me]:
  244. value, time = loop.run_until_complete(node.get(upper_key))
  245. assert isinstance(value, dict) and time == now + 20
  246. assert value[subkey1] == (123, now + 10)
  247. assert value[subkey2] == (456, now + 20)
  248. assert len(value) == 2
  249. assert not loop.run_until_complete(me.store(upper_key, subkey=subkey2, value=345, expiration_time=now + 10))
  250. assert loop.run_until_complete(me.store(upper_key, subkey=subkey2, value=567, expiration_time=now + 30))
  251. assert loop.run_until_complete(me.store(upper_key, subkey=subkey3, value=890, expiration_time=now + 50))
  252. loop.run_until_complete(asyncio.sleep(0.1)) # wait for cache to refresh
  253. for node in [that_guy, me]:
  254. value, time = loop.run_until_complete(node.get(upper_key))
  255. assert isinstance(value, dict) and time == now + 50, (value, time)
  256. assert value[subkey1] == (123, now + 10)
  257. assert value[subkey2] == (567, now + 30)
  258. assert value[subkey3] == (890, now + 50)
  259. assert len(value) == 3
  260. for proc in processes:
  261. proc.terminate()
  262. # The nodes don't own their hivemind.p2p.P2P instances, so we shutdown them separately
  263. loop.run_until_complete(asyncio.wait([node.shutdown() for node in [me, detached_node, that_guy]]))
  264. @pytest.mark.forked
  265. @pytest.mark.asyncio
  266. async def test_dhtnode_replicas():
  267. num_replicas = random.randint(1, 20)
  268. peers = await launch_star_shaped_swarm(n_peers=20, num_replicas=num_replicas)
  269. you = random.choice(peers)
  270. assert await you.store("key1", "foo", get_dht_time() + 999)
  271. actual_key1_replicas = sum(len(peer.protocol.storage) for peer in peers)
  272. assert num_replicas == actual_key1_replicas
  273. assert await you.store("key2", "bar", get_dht_time() + 999)
  274. total_size = sum(len(peer.protocol.storage) for peer in peers)
  275. actual_key2_replicas = total_size - actual_key1_replicas
  276. assert num_replicas == actual_key2_replicas
  277. assert await you.store("key2", "baz", get_dht_time() + 1000)
  278. assert sum(len(peer.protocol.storage) for peer in peers) == total_size, "total size should not have changed"
  279. @pytest.mark.forked
  280. @pytest.mark.asyncio
  281. async def test_dhtnode_caching(T=0.05):
  282. node2 = await DHTNode.create(cache_refresh_before_expiry=5 * T, reuse_get_requests=False)
  283. node1 = await DHTNode.create(
  284. initial_peers=await node2.protocol.p2p.get_visible_maddrs(),
  285. cache_refresh_before_expiry=5 * T,
  286. listen=False,
  287. reuse_get_requests=False,
  288. )
  289. await node2.store("k", [123, "value"], expiration_time=hivemind.get_dht_time() + 7 * T)
  290. await node2.store("k2", [654, "value"], expiration_time=hivemind.get_dht_time() + 7 * T)
  291. await node2.store("k3", [654, "value"], expiration_time=hivemind.get_dht_time() + 15 * T)
  292. await node1.get_many(["k", "k2", "k3", "k4"])
  293. assert len(node1.protocol.cache) == 3
  294. assert len(node1.cache_refresh_queue) == 0
  295. await node1.get_many(["k", "k2", "k3", "k4"])
  296. assert len(node1.cache_refresh_queue) == 3
  297. await node2.store("k", [123, "value"], expiration_time=hivemind.get_dht_time() + 12 * T)
  298. await asyncio.sleep(4 * T)
  299. await node1.get("k")
  300. await asyncio.sleep(1 * T)
  301. assert len(node1.protocol.cache) == 3
  302. assert len(node1.cache_refresh_queue) == 2
  303. await asyncio.sleep(3 * T)
  304. assert len(node1.cache_refresh_queue) == 1
  305. await asyncio.sleep(5 * T)
  306. assert len(node1.cache_refresh_queue) == 0
  307. await asyncio.sleep(5 * T)
  308. assert len(node1.cache_refresh_queue) == 0
  309. await node2.store("k", [123, "value"], expiration_time=hivemind.get_dht_time() + 10 * T)
  310. await node1.get("k")
  311. await asyncio.sleep(1 * T)
  312. assert len(node1.cache_refresh_queue) == 0
  313. await node1.get("k")
  314. await asyncio.sleep(1 * T)
  315. assert len(node1.cache_refresh_queue) == 1
  316. await asyncio.sleep(5 * T)
  317. assert len(node1.cache_refresh_queue) == 0
  318. await asyncio.gather(node1.shutdown(), node2.shutdown())
  319. @pytest.mark.forked
  320. @pytest.mark.asyncio
  321. async def test_dhtnode_reuse_get():
  322. peers = await launch_star_shaped_swarm(n_peers=10, parallel_rpc=256)
  323. await asyncio.gather(
  324. random.choice(peers).store("k1", 123, hivemind.get_dht_time() + 999),
  325. random.choice(peers).store("k2", 567, hivemind.get_dht_time() + 999),
  326. )
  327. you = random.choice(peers)
  328. futures1 = await you.get_many(["k1", "k2"], return_futures=True)
  329. assert len(you.pending_get_requests[DHTID.generate("k1")]) == 1
  330. assert len(you.pending_get_requests[DHTID.generate("k2")]) == 1
  331. futures2 = await you.get_many(["k2", "k3"], return_futures=True)
  332. assert len(you.pending_get_requests[DHTID.generate("k2")]) == 2
  333. await asyncio.gather(*futures1.values(), *futures2.values())
  334. futures3 = await you.get_many(["k3"], return_futures=True)
  335. assert len(you.pending_get_requests[DHTID.generate("k1")]) == 0
  336. assert len(you.pending_get_requests[DHTID.generate("k2")]) == 0
  337. assert len(you.pending_get_requests[DHTID.generate("k3")]) == 1
  338. assert (await futures1["k1"])[0] == 123
  339. assert await futures1["k2"] == await futures2["k2"] and (await futures1["k2"])[0] == 567
  340. assert await futures2["k3"] == await futures3["k3"] and (await futures3["k3"]) is None
  341. @pytest.mark.forked
  342. @pytest.mark.asyncio
  343. async def test_dhtnode_blacklist():
  344. node1, node2, node3, node4 = await launch_star_shaped_swarm(n_peers=4, blacklist_time=999)
  345. assert await node2.store("abc", 123, expiration_time=hivemind.get_dht_time() + 99)
  346. assert len(node2.blacklist.ban_counter) == 0
  347. await asyncio.gather(node3.shutdown(), node4.shutdown())
  348. assert await node2.store("def", 456, expiration_time=hivemind.get_dht_time() + 99)
  349. assert set(node2.blacklist.ban_counter.keys()) == {node3.peer_id, node4.peer_id}
  350. assert await node1.get("abc", latest=True) # force node1 to crawl dht and discover unresponsive peers
  351. assert node3.peer_id in node1.blacklist
  352. assert await node1.get("abc", latest=True) # force node1 to crawl dht and discover unresponsive peers
  353. assert node2.peer_id not in node1.blacklist
  354. await asyncio.gather(node1.shutdown(), node2.shutdown())
  355. @pytest.mark.forked
  356. @pytest.mark.asyncio
  357. async def test_dhtnode_edge_cases():
  358. peers = await launch_star_shaped_swarm(n_peers=4, parallel_rpc=4)
  359. subkeys = [0, "", False, True, "abyrvalg", 4555]
  360. keys = subkeys + [()]
  361. values = subkeys + [[]]
  362. for key, subkey, value in product(keys, subkeys, values):
  363. await random.choice(peers).store(
  364. key=key, subkey=subkey, value=value, expiration_time=hivemind.get_dht_time() + 999
  365. ),
  366. stored = await random.choice(peers).get(key=key, latest=True)
  367. assert stored is not None
  368. assert subkey in stored.value
  369. assert stored.value[subkey].value == value
  370. await asyncio.wait([node.shutdown() for node in peers])