test_p2p_daemon.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. import asyncio
  2. import multiprocessing as mp
  3. import os
  4. import subprocess
  5. import tempfile
  6. from contextlib import closing
  7. from functools import partial
  8. from typing import List
  9. import numpy as np
  10. import pytest
  11. from multiaddr import Multiaddr
  12. from hivemind.p2p import P2P, P2PDaemonError, P2PHandlerError
  13. from hivemind.proto import dht_pb2, test_pb2
  14. from hivemind.utils.serializer import MSGPackSerializer
  15. from test_utils.networking import get_free_port
  16. def is_process_running(pid: int) -> bool:
  17. return subprocess.run(["ps", "-p", str(pid)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode == 0
  18. async def replicate_if_needed(p2p: P2P, replicate: bool) -> P2P:
  19. return await P2P.replicate(p2p.daemon_listen_maddr) if replicate else p2p
  20. @pytest.mark.asyncio
  21. async def test_daemon_killed_on_del():
  22. p2p_daemon = await P2P.create()
  23. child_pid = p2p_daemon._child.pid
  24. assert is_process_running(child_pid)
  25. await p2p_daemon.shutdown()
  26. assert not is_process_running(child_pid)
  27. @pytest.mark.asyncio
  28. async def test_startup_error_message():
  29. with pytest.raises(P2PDaemonError, match=r"(?i)Failed to connect to bootstrap peers"):
  30. await P2P.create(
  31. initial_peers=[f"/ip4/127.0.0.1/tcp/{get_free_port()}/p2p/QmdaK4LUeQaKhqSFPRu9N7MvXUEWDxWwtCvPrS444tCgd1"]
  32. )
  33. with pytest.raises(P2PDaemonError, match=r"Daemon failed to start in .+ seconds"):
  34. await P2P.create(startup_timeout=0.01) # Test that startup_timeout works
  35. @pytest.mark.asyncio
  36. async def test_identity():
  37. with tempfile.TemporaryDirectory() as tempdir:
  38. id1_path = os.path.join(tempdir, "id1")
  39. id2_path = os.path.join(tempdir, "id2")
  40. p2ps = await asyncio.gather(*[P2P.create(identity_path=path) for path in [None, None, id1_path, id2_path]])
  41. # We create the second daemon with id2 separately
  42. # to avoid a race condition while saving a newly generated identity
  43. p2ps.append(await P2P.create(identity_path=id2_path))
  44. # Using the same identity (if any) should lead to the same peer ID
  45. assert p2ps[-2].peer_id == p2ps[-1].peer_id
  46. # The rest of peer IDs should be different
  47. peer_ids = {instance.peer_id for instance in p2ps}
  48. assert len(peer_ids) == 4
  49. for instance in p2ps:
  50. await instance.shutdown()
  51. with pytest.raises(FileNotFoundError, match=r"The directory.+does not exist"):
  52. P2P.generate_identity(id1_path)
  53. @pytest.mark.parametrize(
  54. "host_maddrs",
  55. [
  56. [Multiaddr("/ip4/127.0.0.1/tcp/0")],
  57. [Multiaddr("/ip4/127.0.0.1/udp/0/quic")],
  58. [Multiaddr("/ip4/127.0.0.1/tcp/0"), Multiaddr("/ip4/127.0.0.1/udp/0/quic")],
  59. ],
  60. )
  61. @pytest.mark.asyncio
  62. async def test_transports(host_maddrs: List[Multiaddr]):
  63. server = await P2P.create(host_maddrs=host_maddrs)
  64. peers = await server.list_peers()
  65. assert len(peers) == 0
  66. client = await P2P.create(host_maddrs=host_maddrs, initial_peers=await server.get_visible_maddrs())
  67. await client.wait_for_at_least_n_peers(1)
  68. peers = await client.list_peers()
  69. assert len({p.peer_id for p in peers}) == 1
  70. peers = await server.list_peers()
  71. assert len({p.peer_id for p in peers}) == 1
  72. @pytest.mark.asyncio
  73. async def test_daemon_replica_does_not_affect_primary():
  74. p2p_daemon = await P2P.create()
  75. p2p_replica = await P2P.replicate(p2p_daemon.daemon_listen_maddr)
  76. child_pid = p2p_daemon._child.pid
  77. assert is_process_running(child_pid)
  78. await p2p_replica.shutdown()
  79. assert is_process_running(child_pid)
  80. await p2p_daemon.shutdown()
  81. assert not is_process_running(child_pid)
  82. @pytest.mark.asyncio
  83. async def test_unary_handler_edge_cases():
  84. p2p = await P2P.create()
  85. p2p_replica = await P2P.replicate(p2p.daemon_listen_maddr)
  86. async def square_handler(data: test_pb2.TestRequest, context):
  87. return test_pb2.TestResponse(number=data.number**2)
  88. await p2p.add_protobuf_handler("square", square_handler, test_pb2.TestRequest)
  89. # try adding a duplicate handler
  90. with pytest.raises(P2PDaemonError):
  91. await p2p.add_protobuf_handler("square", square_handler, test_pb2.TestRequest)
  92. # try adding a duplicate handler from replicated p2p
  93. with pytest.raises(P2PDaemonError):
  94. await p2p_replica.add_protobuf_handler("square", square_handler, test_pb2.TestRequest)
  95. # try dialing yourself
  96. with pytest.raises(P2PDaemonError):
  97. await p2p_replica.call_protobuf_handler(
  98. p2p.peer_id, "square", test_pb2.TestRequest(number=41), test_pb2.TestResponse
  99. )
  100. @pytest.mark.parametrize(
  101. "should_cancel,replicate",
  102. [
  103. (True, False),
  104. (True, True),
  105. (False, False),
  106. (False, True),
  107. ],
  108. )
  109. @pytest.mark.asyncio
  110. async def test_call_protobuf_handler(should_cancel, replicate, handle_name="handle"):
  111. handler_cancelled = False
  112. server_primary = await P2P.create()
  113. server = await replicate_if_needed(server_primary, replicate)
  114. async def ping_handler(request, context):
  115. try:
  116. await asyncio.sleep(2)
  117. except asyncio.CancelledError:
  118. nonlocal handler_cancelled
  119. handler_cancelled = True
  120. return dht_pb2.PingResponse(peer=dht_pb2.NodeInfo(node_id=server.peer_id.to_bytes()), available=True)
  121. server_pid = server_primary._child.pid
  122. await server.add_protobuf_handler(handle_name, ping_handler, dht_pb2.PingRequest)
  123. assert is_process_running(server_pid)
  124. client_primary = await P2P.create(initial_peers=await server.get_visible_maddrs())
  125. client = await replicate_if_needed(client_primary, replicate)
  126. client_pid = client_primary._child.pid
  127. assert is_process_running(client_pid)
  128. await client.wait_for_at_least_n_peers(1)
  129. ping_request = dht_pb2.PingRequest(peer=dht_pb2.NodeInfo(node_id=client.peer_id.to_bytes()), validate=True)
  130. expected_response = dht_pb2.PingResponse(peer=dht_pb2.NodeInfo(node_id=server.peer_id.to_bytes()), available=True)
  131. if should_cancel:
  132. call_task = asyncio.create_task(
  133. client.call_protobuf_handler(server.peer_id, handle_name, ping_request, dht_pb2.PingResponse)
  134. )
  135. await asyncio.sleep(0.25)
  136. call_task.cancel()
  137. await asyncio.sleep(0.25)
  138. assert handler_cancelled
  139. else:
  140. actual_response = await client.call_protobuf_handler(
  141. server.peer_id, handle_name, ping_request, dht_pb2.PingResponse
  142. )
  143. assert actual_response == expected_response
  144. assert not handler_cancelled
  145. await server.shutdown()
  146. await server_primary.shutdown()
  147. assert not is_process_running(server_pid)
  148. await client_primary.shutdown()
  149. assert not is_process_running(client_pid)
  150. @pytest.mark.asyncio
  151. async def test_call_protobuf_handler_error(handle_name="handle"):
  152. async def error_handler(request, context):
  153. raise ValueError("boom")
  154. server = await P2P.create()
  155. server_pid = server._child.pid
  156. await server.add_protobuf_handler(handle_name, error_handler, dht_pb2.PingRequest)
  157. assert is_process_running(server_pid)
  158. client = await P2P.create(initial_peers=await server.get_visible_maddrs())
  159. client_pid = client._child.pid
  160. assert is_process_running(client_pid)
  161. await client.wait_for_at_least_n_peers(1)
  162. ping_request = dht_pb2.PingRequest(peer=dht_pb2.NodeInfo(node_id=client.peer_id.to_bytes()), validate=True)
  163. with pytest.raises(P2PHandlerError) as excinfo:
  164. await client.call_protobuf_handler(server.peer_id, handle_name, ping_request, dht_pb2.PingResponse)
  165. assert "boom" in str(excinfo.value)
  166. await server.shutdown()
  167. await client.shutdown()
  168. async def handle_square_stream(_, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
  169. with closing(writer):
  170. while True:
  171. try:
  172. x = MSGPackSerializer.loads(await P2P.receive_raw_data(reader))
  173. except asyncio.IncompleteReadError:
  174. break
  175. result = x**2
  176. await P2P.send_raw_data(MSGPackSerializer.dumps(result), writer)
  177. async def validate_square_stream(reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
  178. with closing(writer):
  179. for _ in range(10):
  180. x = np.random.randint(100)
  181. await P2P.send_raw_data(MSGPackSerializer.dumps(x), writer)
  182. result = MSGPackSerializer.loads(await P2P.receive_raw_data(reader))
  183. assert result == x**2
  184. @pytest.mark.asyncio
  185. async def test_call_peer_single_process():
  186. server = await P2P.create()
  187. server_pid = server._child.pid
  188. assert is_process_running(server_pid)
  189. handler_name = "square"
  190. await server.add_binary_stream_handler(handler_name, handle_square_stream)
  191. client = await P2P.create(initial_peers=await server.get_visible_maddrs())
  192. client_pid = client._child.pid
  193. assert is_process_running(client_pid)
  194. await client.wait_for_at_least_n_peers(1)
  195. _, reader, writer = await client.call_binary_stream_handler(server.peer_id, handler_name)
  196. await validate_square_stream(reader, writer)
  197. await server.shutdown()
  198. assert not is_process_running(server_pid)
  199. await client.shutdown()
  200. assert not is_process_running(client_pid)
  201. async def run_server(handler_name, server_side, response_received):
  202. server = await P2P.create()
  203. server_pid = server._child.pid
  204. assert is_process_running(server_pid)
  205. await server.add_binary_stream_handler(handler_name, handle_square_stream)
  206. server_side.send(server.peer_id)
  207. server_side.send(await server.get_visible_maddrs())
  208. while response_received.value == 0:
  209. await asyncio.sleep(0.5)
  210. await server.shutdown()
  211. assert not is_process_running(server_pid)
  212. def server_target(handler_name, server_side, response_received):
  213. asyncio.run(run_server(handler_name, server_side, response_received))
  214. @pytest.mark.asyncio
  215. async def test_call_peer_different_processes():
  216. handler_name = "square"
  217. server_side, client_side = mp.Pipe()
  218. response_received = mp.Value(np.ctypeslib.as_ctypes_type(np.int32))
  219. response_received.value = 0
  220. proc = mp.Process(target=server_target, args=(handler_name, server_side, response_received))
  221. proc.start()
  222. peer_id = client_side.recv()
  223. peer_maddrs = client_side.recv()
  224. client = await P2P.create(initial_peers=peer_maddrs)
  225. client_pid = client._child.pid
  226. assert is_process_running(client_pid)
  227. await client.wait_for_at_least_n_peers(1)
  228. _, reader, writer = await client.call_binary_stream_handler(peer_id, handler_name)
  229. await validate_square_stream(reader, writer)
  230. response_received.value = 1
  231. await client.shutdown()
  232. assert not is_process_running(client_pid)
  233. proc.join()
  234. assert proc.exitcode == 0
  235. @pytest.mark.asyncio
  236. async def test_error_closes_connection():
  237. async def handle_raising_error(_, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
  238. with closing(writer):
  239. command = await P2P.receive_raw_data(reader)
  240. if command == b"raise_error":
  241. raise Exception("The handler has failed")
  242. else:
  243. await P2P.send_raw_data(b"okay", writer)
  244. server = await P2P.create()
  245. server_pid = server._child.pid
  246. assert is_process_running(server_pid)
  247. handler_name = "handler"
  248. await server.add_binary_stream_handler(handler_name, handle_raising_error)
  249. client = await P2P.create(initial_peers=await server.get_visible_maddrs())
  250. client_pid = client._child.pid
  251. assert is_process_running(client_pid)
  252. await client.wait_for_at_least_n_peers(1)
  253. _, reader, writer = await client.call_binary_stream_handler(server.peer_id, handler_name)
  254. with closing(writer):
  255. await P2P.send_raw_data(b"raise_error", writer)
  256. with pytest.raises(asyncio.IncompleteReadError): # Means that the connection is closed
  257. await P2P.receive_raw_data(reader)
  258. # Despite the handler raised an exception, the server did not crash and ready for next requests
  259. assert is_process_running(server_pid)
  260. _, reader, writer = await client.call_binary_stream_handler(server.peer_id, handler_name)
  261. with closing(writer):
  262. await P2P.send_raw_data(b"behave_normally", writer)
  263. assert await P2P.receive_raw_data(reader) == b"okay"
  264. await server.shutdown()
  265. assert not is_process_running(server_pid)
  266. await client.shutdown()
  267. assert not is_process_running(client_pid)
  268. @pytest.mark.asyncio
  269. async def test_handlers_on_different_replicas():
  270. async def handler(_, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, key: str) -> None:
  271. with closing(writer):
  272. await P2P.send_raw_data(key, writer)
  273. server_primary = await P2P.create()
  274. server_id = server_primary.peer_id
  275. await server_primary.add_binary_stream_handler("handle_primary", partial(handler, key=b"primary"))
  276. server_replica1 = await replicate_if_needed(server_primary, True)
  277. await server_replica1.add_binary_stream_handler("handle1", partial(handler, key=b"replica1"))
  278. server_replica2 = await replicate_if_needed(server_primary, True)
  279. await server_replica2.add_binary_stream_handler("handle2", partial(handler, key=b"replica2"))
  280. client = await P2P.create(initial_peers=await server_primary.get_visible_maddrs())
  281. await client.wait_for_at_least_n_peers(1)
  282. for name, expected_key in [("handle_primary", b"primary"), ("handle1", b"replica1"), ("handle2", b"replica2")]:
  283. _, reader, writer = await client.call_binary_stream_handler(server_id, name)
  284. with closing(writer):
  285. assert await P2P.receive_raw_data(reader) == expected_key
  286. await server_replica1.shutdown()
  287. await server_replica2.shutdown()
  288. # Primary does not handle replicas protocols after their shutdown
  289. for name in ["handle1", "handle2"]:
  290. _, reader, writer = await client.call_binary_stream_handler(server_id, name)
  291. with pytest.raises(asyncio.IncompleteReadError), closing(writer):
  292. await P2P.receive_raw_data(reader)
  293. await server_primary.shutdown()
  294. await client.shutdown()