|
@@ -16,37 +16,43 @@ from hivemind.dht.protocol import DHTProtocol, ValidationError
|
|
from hivemind.dht.storage import DictionaryDHTValue
|
|
from hivemind.dht.storage import DictionaryDHTValue
|
|
|
|
|
|
|
|
|
|
-def run_protocol_listener(port: int, dhtid: DHTID, started: mp.synchronize.Event, ping: Optional[Endpoint] = None):
|
|
|
|
|
|
+def run_protocol_listener(port: int, dhtid: DHTID, pipe_side: mp.connection.Connection, ping: Optional[Endpoint] = None):
|
|
loop = asyncio.get_event_loop()
|
|
loop = asyncio.get_event_loop()
|
|
protocol = loop.run_until_complete(DHTProtocol.create(
|
|
protocol = loop.run_until_complete(DHTProtocol.create(
|
|
dhtid, bucket_size=20, depth_modulo=5, num_replicas=3, wait_timeout=5, listen_on=f"{LOCALHOST}:{port}"))
|
|
dhtid, bucket_size=20, depth_modulo=5, num_replicas=3, wait_timeout=5, listen_on=f"{LOCALHOST}:{port}"))
|
|
|
|
|
|
- assert protocol.port == port
|
|
|
|
|
|
+ port = protocol.port
|
|
print(f"Started peer id={protocol.node_id} port={port}", flush=True)
|
|
print(f"Started peer id={protocol.node_id} port={port}", flush=True)
|
|
|
|
|
|
if ping is not None:
|
|
if ping is not None:
|
|
loop.run_until_complete(protocol.call_ping(ping))
|
|
loop.run_until_complete(protocol.call_ping(ping))
|
|
- started.set()
|
|
|
|
|
|
+
|
|
|
|
+ pipe_side.send((protocol.port, protocol.server.endpoint))
|
|
|
|
+
|
|
loop.run_until_complete(protocol.server.wait_for_termination())
|
|
loop.run_until_complete(protocol.server.wait_for_termination())
|
|
print(f"Finished peer id={protocol.node_id} port={port}", flush=True)
|
|
print(f"Finished peer id={protocol.node_id} port={port}", flush=True)
|
|
|
|
|
|
|
|
|
|
-# note: we run grpc-related tests in a separate process to re-initialize all global states from scratch
|
|
|
|
-# this helps us avoid undesirable side-effects (e.g. segfaults) when running multiple tests in sequence
|
|
|
|
|
|
+# note: we run network-related tests in a separate process to re-initialize all global states from scratch
|
|
|
|
+# this helps us avoid undesirable side-effects (e.g. segfaults) when running multiple tests in a sequence
|
|
|
|
|
|
|
|
|
|
@pytest.mark.forked
|
|
@pytest.mark.forked
|
|
def test_dht_protocol():
|
|
def test_dht_protocol():
|
|
# create the first peer
|
|
# create the first peer
|
|
- peer1_port, peer1_id, peer1_started = hivemind.find_open_port(), DHTID.generate(), mp.Event()
|
|
|
|
- peer1_proc = mp.Process(target=run_protocol_listener, args=(peer1_port, peer1_id, peer1_started), daemon=True)
|
|
|
|
- peer1_proc.start(), peer1_started.wait()
|
|
|
|
|
|
+ first_side, ours_side = mp.Pipe()
|
|
|
|
+ peer1_port, peer1_id = hivemind.find_open_port(), DHTID.generate()
|
|
|
|
+ peer1_proc = mp.Process(target=run_protocol_listener, args=(peer1_port, peer1_id, first_side), daemon=True)
|
|
|
|
+ peer1_proc.start()
|
|
|
|
+ peer1_port, peer1_endpoint = ours_side.recv()
|
|
|
|
|
|
# create another peer that connects to the first peer
|
|
# create another peer that connects to the first peer
|
|
- peer2_port, peer2_id, peer2_started = hivemind.find_open_port(), DHTID.generate(), mp.Event()
|
|
|
|
- peer2_proc = mp.Process(target=run_protocol_listener, args=(peer2_port, peer2_id, peer2_started),
|
|
|
|
- kwargs={'ping': f'{LOCALHOST}:{peer1_port}'}, daemon=True)
|
|
|
|
- peer2_proc.start(), peer2_started.wait()
|
|
|
|
|
|
+ second_side, ours_side = mp.Pipe()
|
|
|
|
+ peer2_port, peer2_id = hivemind.find_open_port(), DHTID.generate()
|
|
|
|
+ peer2_proc = mp.Process(target=run_protocol_listener, args=(peer2_port, peer2_id, second_side),
|
|
|
|
+ kwargs={'ping': peer1_endpoint}, daemon=True)
|
|
|
|
+ peer2_proc.start()
|
|
|
|
+ peer2_port, peer2_endpoint = ours_side.recv()
|
|
|
|
|
|
loop = asyncio.get_event_loop()
|
|
loop = asyncio.get_event_loop()
|
|
for listen in [False, True]: # note: order matters, this test assumes that first run uses listen=False
|
|
for listen in [False, True]: # note: order matters, this test assumes that first run uses listen=False
|
|
@@ -54,21 +60,21 @@ def test_dht_protocol():
|
|
DHTID.generate(), bucket_size=20, depth_modulo=5, wait_timeout=5, num_replicas=3, listen=listen))
|
|
DHTID.generate(), bucket_size=20, depth_modulo=5, wait_timeout=5, num_replicas=3, listen=listen))
|
|
print(f"Self id={protocol.node_id}", flush=True)
|
|
print(f"Self id={protocol.node_id}", flush=True)
|
|
|
|
|
|
- assert loop.run_until_complete(protocol.call_ping(f'{LOCALHOST}:{peer1_port}')) == peer1_id
|
|
|
|
|
|
+ assert loop.run_until_complete(protocol.call_ping(peer1_endpoint)) == peer1_id
|
|
|
|
|
|
key, value, expiration = DHTID.generate(), [random.random(), {'ololo': 'pyshpysh'}], get_dht_time() + 1e3
|
|
key, value, expiration = DHTID.generate(), [random.random(), {'ololo': 'pyshpysh'}], get_dht_time() + 1e3
|
|
store_ok = loop.run_until_complete(protocol.call_store(
|
|
store_ok = loop.run_until_complete(protocol.call_store(
|
|
- f'{LOCALHOST}:{peer1_port}', [key], [hivemind.MSGPackSerializer.dumps(value)], expiration)
|
|
|
|
|
|
+ peer1_endpoint, [key], [hivemind.MSGPackSerializer.dumps(value)], expiration)
|
|
)
|
|
)
|
|
assert all(store_ok), "DHT rejected a trivial store"
|
|
assert all(store_ok), "DHT rejected a trivial store"
|
|
|
|
|
|
# peer 1 must know about peer 2
|
|
# peer 1 must know about peer 2
|
|
(recv_value_bytes, recv_expiration), nodes_found = loop.run_until_complete(
|
|
(recv_value_bytes, recv_expiration), nodes_found = loop.run_until_complete(
|
|
- protocol.call_find(f'{LOCALHOST}:{peer1_port}', [key]))[key]
|
|
|
|
|
|
+ protocol.call_find(peer1_endpoint, [key]))[key]
|
|
recv_value = hivemind.MSGPackSerializer.loads(recv_value_bytes)
|
|
recv_value = hivemind.MSGPackSerializer.loads(recv_value_bytes)
|
|
(recv_id, recv_endpoint) = next(iter(nodes_found.items()))
|
|
(recv_id, recv_endpoint) = next(iter(nodes_found.items()))
|
|
- assert recv_id == peer2_id and ':'.join(recv_endpoint.split(':')[-2:]) == f"{LOCALHOST}:{peer2_port}", \
|
|
|
|
- f"expected id={peer2_id}, peer={LOCALHOST}:{peer2_port} but got {recv_id}, {recv_endpoint}"
|
|
|
|
|
|
+ assert recv_id == peer2_id and recv_endpoint == peer2_endpoint, \
|
|
|
|
+ f"expected id={peer2_id}, peer={peer2_endpoint} but got {recv_id}, {recv_endpoint}"
|
|
|
|
|
|
assert recv_value == value and recv_expiration == expiration, \
|
|
assert recv_value == value and recv_expiration == expiration, \
|
|
f"call_find_value expected {value} (expires by {expiration}) " \
|
|
f"call_find_value expected {value} (expires by {expiration}) " \
|
|
@@ -77,11 +83,11 @@ def test_dht_protocol():
|
|
# peer 2 must know about peer 1, but not have a *random* nonexistent value
|
|
# peer 2 must know about peer 1, but not have a *random* nonexistent value
|
|
dummy_key = DHTID.generate()
|
|
dummy_key = DHTID.generate()
|
|
empty_item, nodes_found_2 = loop.run_until_complete(
|
|
empty_item, nodes_found_2 = loop.run_until_complete(
|
|
- protocol.call_find(f'{LOCALHOST}:{peer2_port}', [dummy_key]))[dummy_key]
|
|
|
|
|
|
+ protocol.call_find(peer2_endpoint, [dummy_key]))[dummy_key]
|
|
assert empty_item is None, "Non-existent keys shouldn't have values"
|
|
assert empty_item is None, "Non-existent keys shouldn't have values"
|
|
(recv_id, recv_endpoint) = next(iter(nodes_found_2.items()))
|
|
(recv_id, recv_endpoint) = next(iter(nodes_found_2.items()))
|
|
- assert recv_id == peer1_id and recv_endpoint == f"{LOCALHOST}:{peer1_port}", \
|
|
|
|
- f"expected id={peer1_id}, peer={LOCALHOST}:{peer1_port} but got {recv_id}, {recv_endpoint}"
|
|
|
|
|
|
+ assert recv_id == peer1_id and recv_endpoint == peer1_endpoint, \
|
|
|
|
+ f"expected id={peer1_id}, peer={peer1_endpoint} but got {recv_id}, {recv_endpoint}"
|
|
|
|
|
|
# cause a non-response by querying a nonexistent peer
|
|
# cause a non-response by querying a nonexistent peer
|
|
dummy_port = hivemind.find_open_port()
|
|
dummy_port = hivemind.find_open_port()
|
|
@@ -91,35 +97,38 @@ def test_dht_protocol():
|
|
nested_key, subkey1, subkey2 = DHTID.generate(), 'foo', 'bar'
|
|
nested_key, subkey1, subkey2 = DHTID.generate(), 'foo', 'bar'
|
|
value1, value2 = [random.random(), {'ololo': 'pyshpysh'}], 'abacaba'
|
|
value1, value2 = [random.random(), {'ololo': 'pyshpysh'}], 'abacaba'
|
|
assert loop.run_until_complete(protocol.call_store(
|
|
assert loop.run_until_complete(protocol.call_store(
|
|
- f'{LOCALHOST}:{peer1_port}', keys=[nested_key], values=[hivemind.MSGPackSerializer.dumps(value1)],
|
|
|
|
|
|
+ peer1_endpoint, keys=[nested_key], values=[hivemind.MSGPackSerializer.dumps(value1)],
|
|
expiration_time=[expiration], subkeys=[subkey1])
|
|
expiration_time=[expiration], subkeys=[subkey1])
|
|
)
|
|
)
|
|
assert loop.run_until_complete(protocol.call_store(
|
|
assert loop.run_until_complete(protocol.call_store(
|
|
- f'{LOCALHOST}:{peer1_port}', keys=[nested_key], values=[hivemind.MSGPackSerializer.dumps(value2)],
|
|
|
|
|
|
+ peer1_endpoint, keys=[nested_key], values=[hivemind.MSGPackSerializer.dumps(value2)],
|
|
expiration_time=[expiration + 5], subkeys=[subkey2])
|
|
expiration_time=[expiration + 5], subkeys=[subkey2])
|
|
)
|
|
)
|
|
(recv_dict, recv_expiration), nodes_found = loop.run_until_complete(
|
|
(recv_dict, recv_expiration), nodes_found = loop.run_until_complete(
|
|
- protocol.call_find(f'{LOCALHOST}:{peer1_port}', [nested_key]))[nested_key]
|
|
|
|
|
|
+ protocol.call_find(peer1_endpoint, [nested_key]))[nested_key]
|
|
assert isinstance(recv_dict, DictionaryDHTValue)
|
|
assert isinstance(recv_dict, DictionaryDHTValue)
|
|
assert len(recv_dict.data) == 2 and recv_expiration == expiration + 5
|
|
assert len(recv_dict.data) == 2 and recv_expiration == expiration + 5
|
|
assert recv_dict.data[subkey1] == (protocol.serializer.dumps(value1), expiration)
|
|
assert recv_dict.data[subkey1] == (protocol.serializer.dumps(value1), expiration)
|
|
assert recv_dict.data[subkey2] == (protocol.serializer.dumps(value2), expiration + 5)
|
|
assert recv_dict.data[subkey2] == (protocol.serializer.dumps(value2), expiration + 5)
|
|
|
|
|
|
- assert LOCALHOST in loop.run_until_complete(protocol.get_outgoing_request_endpoint(f'{LOCALHOST}:{peer1_port}'))
|
|
|
|
|
|
+ assert protocol.client.endpoint == loop.run_until_complete(protocol.get_outgoing_request_endpoint(peer1_endpoint))
|
|
|
|
|
|
if listen:
|
|
if listen:
|
|
loop.run_until_complete(protocol.shutdown())
|
|
loop.run_until_complete(protocol.shutdown())
|
|
|
|
|
|
peer1_proc.terminate()
|
|
peer1_proc.terminate()
|
|
peer2_proc.terminate()
|
|
peer2_proc.terminate()
|
|
|
|
+ protocol.__del__() #TODO
|
|
|
|
|
|
|
|
|
|
@pytest.mark.forked
|
|
@pytest.mark.forked
|
|
def test_empty_table():
|
|
def test_empty_table():
|
|
""" Test RPC methods with empty routing table """
|
|
""" Test RPC methods with empty routing table """
|
|
- peer_port, peer_id, peer_started = hivemind.find_open_port(), DHTID.generate(), mp.Event()
|
|
|
|
- peer_proc = mp.Process(target=run_protocol_listener, args=(peer_port, peer_id, peer_started), daemon=True)
|
|
|
|
- peer_proc.start(), peer_started.wait()
|
|
|
|
|
|
+ theirs_side, ours_side = mp.Pipe()
|
|
|
|
+ peer_port, peer_id = hivemind.find_open_port(), DHTID.generate()
|
|
|
|
+ peer_proc = mp.Process(target=run_protocol_listener, args=(peer_port, peer_id, theirs_side), daemon=True)
|
|
|
|
+ peer_proc.start()
|
|
|
|
+ peer_port, peer_endpoint = ours_side.recv()
|
|
|
|
|
|
loop = asyncio.get_event_loop()
|
|
loop = asyncio.get_event_loop()
|
|
protocol = loop.run_until_complete(DHTProtocol.create(
|
|
protocol = loop.run_until_complete(DHTProtocol.create(
|
|
@@ -128,21 +137,22 @@ def test_empty_table():
|
|
key, value, expiration = DHTID.generate(), [random.random(), {'ololo': 'pyshpysh'}], get_dht_time() + 1e3
|
|
key, value, expiration = DHTID.generate(), [random.random(), {'ololo': 'pyshpysh'}], get_dht_time() + 1e3
|
|
|
|
|
|
empty_item, nodes_found = loop.run_until_complete(
|
|
empty_item, nodes_found = loop.run_until_complete(
|
|
- protocol.call_find(f'{LOCALHOST}:{peer_port}', [key]))[key]
|
|
|
|
|
|
+ protocol.call_find(peer_endpoint, [key]))[key]
|
|
assert empty_item is None and len(nodes_found) == 0
|
|
assert empty_item is None and len(nodes_found) == 0
|
|
assert all(loop.run_until_complete(protocol.call_store(
|
|
assert all(loop.run_until_complete(protocol.call_store(
|
|
- f'{LOCALHOST}:{peer_port}', [key], [hivemind.MSGPackSerializer.dumps(value)], expiration)
|
|
|
|
|
|
+ peer_endpoint, [key], [hivemind.MSGPackSerializer.dumps(value)], expiration)
|
|
)), "peer rejected store"
|
|
)), "peer rejected store"
|
|
|
|
|
|
(recv_value_bytes, recv_expiration), nodes_found = loop.run_until_complete(
|
|
(recv_value_bytes, recv_expiration), nodes_found = loop.run_until_complete(
|
|
- protocol.call_find(f'{LOCALHOST}:{peer_port}', [key]))[key]
|
|
|
|
|
|
+ protocol.call_find(peer_endpoint, [key]))[key]
|
|
recv_value = hivemind.MSGPackSerializer.loads(recv_value_bytes)
|
|
recv_value = hivemind.MSGPackSerializer.loads(recv_value_bytes)
|
|
assert len(nodes_found) == 0
|
|
assert len(nodes_found) == 0
|
|
assert recv_value == value and recv_expiration == expiration
|
|
assert recv_value == value and recv_expiration == expiration
|
|
|
|
|
|
- assert loop.run_until_complete(protocol.call_ping(f'{LOCALHOST}:{peer_port}')) == peer_id
|
|
|
|
|
|
+ assert loop.run_until_complete(protocol.call_ping(peer_endpoint)) == peer_id
|
|
assert loop.run_until_complete(protocol.call_ping(f'{LOCALHOST}:{hivemind.find_open_port()}')) is None
|
|
assert loop.run_until_complete(protocol.call_ping(f'{LOCALHOST}:{hivemind.find_open_port()}')) is None
|
|
peer_proc.terminate()
|
|
peer_proc.terminate()
|
|
|
|
+ protocol.__del__() #TODO
|
|
|
|
|
|
|
|
|
|
def run_node(node_id, peers, status_pipe: mp.Pipe):
|
|
def run_node(node_id, peers, status_pipe: mp.Pipe):
|
|
@@ -151,9 +161,8 @@ def run_node(node_id, peers, status_pipe: mp.Pipe):
|
|
asyncio.set_event_loop(asyncio.new_event_loop())
|
|
asyncio.set_event_loop(asyncio.new_event_loop())
|
|
loop = asyncio.get_event_loop()
|
|
loop = asyncio.get_event_loop()
|
|
node = loop.run_until_complete(DHTNode.create(node_id, initial_peers=peers))
|
|
node = loop.run_until_complete(DHTNode.create(node_id, initial_peers=peers))
|
|
- status_pipe.send(node.port)
|
|
|
|
- while True:
|
|
|
|
- loop.run_forever()
|
|
|
|
|
|
+ status_pipe.send((node.port, node.endpoint))
|
|
|
|
+ loop.run_until_complete(node.protocol.server.wait_for_termination())
|
|
|
|
|
|
|
|
|
|
@pytest.mark.forked
|
|
@pytest.mark.forked
|
|
@@ -168,17 +177,17 @@ def test_dht_node():
|
|
pipe_recv, pipe_send = mp.Pipe(duplex=False)
|
|
pipe_recv, pipe_send = mp.Pipe(duplex=False)
|
|
proc = mp.Process(target=run_node, args=(node_id, peers, pipe_send), daemon=True)
|
|
proc = mp.Process(target=run_node, args=(node_id, peers, pipe_send), daemon=True)
|
|
proc.start()
|
|
proc.start()
|
|
- port = pipe_recv.recv()
|
|
|
|
|
|
+ port, endpoint = pipe_recv.recv()
|
|
processes.append(proc)
|
|
processes.append(proc)
|
|
- dht[f"{LOCALHOST}:{port}"] = node_id
|
|
|
|
|
|
+ dht[endpoint] = node_id
|
|
|
|
|
|
loop = asyncio.get_event_loop()
|
|
loop = asyncio.get_event_loop()
|
|
- me = loop.run_until_complete(DHTNode.create(initial_peers=random.sample(dht.keys(), 5), parallel_rpc=10,
|
|
|
|
|
|
+ me = loop.run_until_complete(DHTNode.create(initial_peers=random.sample(dht.keys(), min(len(dht), 5)), parallel_rpc=2,
|
|
cache_refresh_before_expiry=False))
|
|
cache_refresh_before_expiry=False))
|
|
|
|
|
|
# test 1: find self
|
|
# test 1: find self
|
|
nearest = loop.run_until_complete(me.find_nearest_nodes([me.node_id], k_nearest=1))[me.node_id]
|
|
nearest = loop.run_until_complete(me.find_nearest_nodes([me.node_id], k_nearest=1))[me.node_id]
|
|
- assert len(nearest) == 1 and ':'.join(nearest[me.node_id].split(':')[-2:]) == f"{LOCALHOST}:{me.port}"
|
|
|
|
|
|
+ assert len(nearest) == 1 and nearest[me.node_id] == me.endpoint
|
|
|
|
|
|
# test 2: find others
|
|
# test 2: find others
|
|
for i in range(10):
|
|
for i in range(10):
|
|
@@ -186,7 +195,7 @@ def test_dht_node():
|
|
nearest = loop.run_until_complete(me.find_nearest_nodes([query_id], k_nearest=1))[query_id]
|
|
nearest = loop.run_until_complete(me.find_nearest_nodes([query_id], k_nearest=1))[query_id]
|
|
assert len(nearest) == 1
|
|
assert len(nearest) == 1
|
|
found_node_id, found_endpoint = next(iter(nearest.items()))
|
|
found_node_id, found_endpoint = next(iter(nearest.items()))
|
|
- assert found_node_id == query_id and ':'.join(found_endpoint.split(':')[-2:]) == ref_endpoint
|
|
|
|
|
|
+ assert found_node_id == query_id and found_endpoint == ref_endpoint
|
|
|
|
|
|
# test 3: find neighbors to random nodes
|
|
# test 3: find neighbors to random nodes
|
|
accuracy_numerator = accuracy_denominator = 0 # top-1 nearest neighbor accuracy
|
|
accuracy_numerator = accuracy_denominator = 0 # top-1 nearest neighbor accuracy
|
|
@@ -195,7 +204,7 @@ def test_dht_node():
|
|
|
|
|
|
for i in range(10):
|
|
for i in range(10):
|
|
query_id = DHTID.generate()
|
|
query_id = DHTID.generate()
|
|
- k_nearest = random.randint(1, 10)
|
|
|
|
|
|
+ k_nearest = random.randint(1, len(dht))
|
|
exclude_self = random.random() > 0.5
|
|
exclude_self = random.random() > 0.5
|
|
nearest = loop.run_until_complete(
|
|
nearest = loop.run_until_complete(
|
|
me.find_nearest_nodes([query_id], k_nearest=k_nearest, exclude_self=exclude_self))[query_id]
|
|
me.find_nearest_nodes([query_id], k_nearest=k_nearest, exclude_self=exclude_self))[query_id]
|
|
@@ -233,7 +242,7 @@ def test_dht_node():
|
|
# test 5: node without peers
|
|
# test 5: node without peers
|
|
detached_node = loop.run_until_complete(DHTNode.create())
|
|
detached_node = loop.run_until_complete(DHTNode.create())
|
|
nearest = loop.run_until_complete(detached_node.find_nearest_nodes([dummy]))[dummy]
|
|
nearest = loop.run_until_complete(detached_node.find_nearest_nodes([dummy]))[dummy]
|
|
- assert len(nearest) == 1 and nearest[detached_node.node_id] == f"{LOCALHOST}:{detached_node.port}"
|
|
|
|
|
|
+ assert len(nearest) == 1 and nearest[detached_node.node_id] == detached_node.endpoint
|
|
nearest = loop.run_until_complete(detached_node.find_nearest_nodes([dummy], exclude_self=True))[dummy]
|
|
nearest = loop.run_until_complete(detached_node.find_nearest_nodes([dummy], exclude_self=True))[dummy]
|
|
assert len(nearest) == 0
|
|
assert len(nearest) == 0
|
|
|
|
|
|
@@ -286,6 +295,9 @@ def test_dht_node():
|
|
|
|
|
|
for proc in processes:
|
|
for proc in processes:
|
|
proc.terminate()
|
|
proc.terminate()
|
|
|
|
+ me.__del__()#TODO
|
|
|
|
+ detached_node.__del__()
|
|
|
|
+ that_guy.__del__()
|
|
|
|
|
|
|
|
|
|
@pytest.mark.forked
|
|
@pytest.mark.forked
|
|
@@ -314,12 +326,15 @@ async def test_dhtnode_replicas():
|
|
assert await you.store('key2', 'baz', get_dht_time() + 1000)
|
|
assert await you.store('key2', 'baz', get_dht_time() + 1000)
|
|
assert sum(len(peer.protocol.storage) for peer in peers) == total_size, "total size should not have changed"
|
|
assert sum(len(peer.protocol.storage) for peer in peers) == total_size, "total size should not have changed"
|
|
|
|
|
|
|
|
+ for p in peers:
|
|
|
|
+ p.__del__()#TODO
|
|
|
|
+
|
|
|
|
|
|
@pytest.mark.forked
|
|
@pytest.mark.forked
|
|
@pytest.mark.asyncio
|
|
@pytest.mark.asyncio
|
|
async def test_dhtnode_caching(T=0.05):
|
|
async def test_dhtnode_caching(T=0.05):
|
|
node2 = await hivemind.DHTNode.create(cache_refresh_before_expiry=5 * T, reuse_get_requests=False)
|
|
node2 = await hivemind.DHTNode.create(cache_refresh_before_expiry=5 * T, reuse_get_requests=False)
|
|
- node1 = await hivemind.DHTNode.create(initial_peers=[f'localhost:{node2.port}'],
|
|
|
|
|
|
+ node1 = await hivemind.DHTNode.create(initial_peers=[node2.endpoint],
|
|
cache_refresh_before_expiry=5 * T, listen=False, reuse_get_requests=False)
|
|
cache_refresh_before_expiry=5 * T, listen=False, reuse_get_requests=False)
|
|
await node2.store('k', [123, 'value'], expiration_time=hivemind.get_dht_time() + 7 * T)
|
|
await node2.store('k', [123, 'value'], expiration_time=hivemind.get_dht_time() + 7 * T)
|
|
await node2.store('k2', [654, 'value'], expiration_time=hivemind.get_dht_time() + 7 * T)
|
|
await node2.store('k2', [654, 'value'], expiration_time=hivemind.get_dht_time() + 7 * T)
|
|
@@ -359,15 +374,17 @@ async def test_dhtnode_caching(T=0.05):
|
|
assert len(node1.cache_refresh_queue) == 0
|
|
assert len(node1.cache_refresh_queue) == 0
|
|
|
|
|
|
await asyncio.gather(node1.shutdown(), node2.shutdown())
|
|
await asyncio.gather(node1.shutdown(), node2.shutdown())
|
|
|
|
+ node1.__del__()#TODO
|
|
|
|
+ node2.__del__()
|
|
|
|
|
|
|
|
|
|
@pytest.mark.forked
|
|
@pytest.mark.forked
|
|
@pytest.mark.asyncio
|
|
@pytest.mark.asyncio
|
|
async def test_dhtnode_reuse_get():
|
|
async def test_dhtnode_reuse_get():
|
|
peers = []
|
|
peers = []
|
|
- for i in range(10):
|
|
|
|
- neighbors_i = [f'{LOCALHOST}:{node.port}' for node in random.sample(peers, min(3, len(peers)))]
|
|
|
|
- peers.append(await hivemind.DHTNode.create(initial_peers=neighbors_i, parallel_rpc=256))
|
|
|
|
|
|
+ for i in range(5):
|
|
|
|
+ neighbors_i = [node.endpoint for node in random.sample(peers, min(3, len(peers)))]
|
|
|
|
+ peers.append(await hivemind.DHTNode.create(initial_peers=neighbors_i, parallel_rpc=32))
|
|
|
|
|
|
await asyncio.gather(
|
|
await asyncio.gather(
|
|
random.choice(peers).store('k1', 123, hivemind.get_dht_time() + 999),
|
|
random.choice(peers).store('k1', 123, hivemind.get_dht_time() + 999),
|
|
@@ -393,14 +410,17 @@ async def test_dhtnode_reuse_get():
|
|
assert await futures1['k2'] == await futures2['k2'] and (await futures1['k2'])[0] == 567
|
|
assert await futures1['k2'] == await futures2['k2'] and (await futures1['k2'])[0] == 567
|
|
assert await futures2['k3'] == await futures3['k3'] and (await futures3['k3']) is None
|
|
assert await futures2['k3'] == await futures3['k3'] and (await futures3['k3']) is None
|
|
|
|
|
|
|
|
+ for p in peers:
|
|
|
|
+ p.__del__()#TODO
|
|
|
|
+
|
|
|
|
|
|
@pytest.mark.forked
|
|
@pytest.mark.forked
|
|
@pytest.mark.asyncio
|
|
@pytest.mark.asyncio
|
|
async def test_dhtnode_blacklist():
|
|
async def test_dhtnode_blacklist():
|
|
node1 = await hivemind.DHTNode.create(blacklist_time=999)
|
|
node1 = await hivemind.DHTNode.create(blacklist_time=999)
|
|
- node2 = await hivemind.DHTNode.create(blacklist_time=999, initial_peers=[f"{LOCALHOST}:{node1.port}"])
|
|
|
|
- node3 = await hivemind.DHTNode.create(blacklist_time=999, initial_peers=[f"{LOCALHOST}:{node1.port}"])
|
|
|
|
- node4 = await hivemind.DHTNode.create(blacklist_time=999, initial_peers=[f"{LOCALHOST}:{node1.port}"])
|
|
|
|
|
|
+ node2 = await hivemind.DHTNode.create(blacklist_time=999, initial_peers=[node1.endpoint])
|
|
|
|
+ node3 = await hivemind.DHTNode.create(blacklist_time=999, initial_peers=[node1.endpoint])
|
|
|
|
+ node4 = await hivemind.DHTNode.create(blacklist_time=999, initial_peers=[node1.endpoint])
|
|
|
|
|
|
assert await node2.store('abc', 123, expiration_time=hivemind.get_dht_time() + 99)
|
|
assert await node2.store('abc', 123, expiration_time=hivemind.get_dht_time() + 99)
|
|
assert len(node2.blacklist.ban_counter) == 0
|
|
assert len(node2.blacklist.ban_counter) == 0
|
|
@@ -413,25 +433,26 @@ async def test_dhtnode_blacklist():
|
|
assert len(node2.blacklist.ban_counter) == 2
|
|
assert len(node2.blacklist.ban_counter) == 2
|
|
|
|
|
|
for banned_peer in node2.blacklist.ban_counter:
|
|
for banned_peer in node2.blacklist.ban_counter:
|
|
- assert any(banned_peer.endswith(str(port)) for port in [node3.port, node4.port])
|
|
|
|
|
|
+ assert any(banned_peer == endpoint for endpoint in [node3.endpoint, node4.endpoint])
|
|
|
|
|
|
- node3_endpoint = await node3.protocol.get_outgoing_request_endpoint(f"{hivemind.LOCALHOST}:{node1.port}")
|
|
|
|
- node3_endpoint = replace_port(node3_endpoint, node3.port)
|
|
|
|
|
|
+ node3_endpoint = await node3.protocol.get_outgoing_request_endpoint(node1.endpoint)
|
|
assert await node1.get('abc', latest=True) # force node1 to crawl dht and discover unresponsive peers
|
|
assert await node1.get('abc', latest=True) # force node1 to crawl dht and discover unresponsive peers
|
|
assert node3_endpoint in node1.blacklist
|
|
assert node3_endpoint in node1.blacklist
|
|
|
|
|
|
- node2_endpoint = await node2.protocol.get_outgoing_request_endpoint(f"{hivemind.LOCALHOST}:{node1.port}")
|
|
|
|
- node2_endpoint = replace_port(node2_endpoint, node2.port)
|
|
|
|
|
|
+ node2_endpoint = await node2.protocol.get_outgoing_request_endpoint(node1.endpoint)
|
|
assert await node1.get('abc', latest=True) # force node1 to crawl dht and discover unresponsive peers
|
|
assert await node1.get('abc', latest=True) # force node1 to crawl dht and discover unresponsive peers
|
|
assert node2_endpoint not in node1.blacklist
|
|
assert node2_endpoint not in node1.blacklist
|
|
|
|
|
|
|
|
+ for node in [node1, node2, node3, node4]:
|
|
|
|
+ node.__del__()#TODO
|
|
|
|
+
|
|
|
|
|
|
@pytest.mark.forked
|
|
@pytest.mark.forked
|
|
@pytest.mark.asyncio
|
|
@pytest.mark.asyncio
|
|
async def test_dhtnode_validate(fake_endpoint='127.0.0.721:*'):
|
|
async def test_dhtnode_validate(fake_endpoint='127.0.0.721:*'):
|
|
node1 = await hivemind.DHTNode.create(blacklist_time=999)
|
|
node1 = await hivemind.DHTNode.create(blacklist_time=999)
|
|
with pytest.raises(ValidationError):
|
|
with pytest.raises(ValidationError):
|
|
- node2 = await hivemind.DHTNode.create(blacklist_time=999, initial_peers=[f"{LOCALHOST}:{node1.port}"],
|
|
|
|
|
|
+ node2 = await hivemind.DHTNode.create(blacklist_time=999, initial_peers=[node1.endpoint],
|
|
endpoint=fake_endpoint)
|
|
endpoint=fake_endpoint)
|
|
|
|
|
|
|
|
|
|
@@ -440,7 +461,7 @@ async def test_dhtnode_validate(fake_endpoint='127.0.0.721:*'):
|
|
async def test_dhtnode_edge_cases():
|
|
async def test_dhtnode_edge_cases():
|
|
peers = []
|
|
peers = []
|
|
for i in range(5):
|
|
for i in range(5):
|
|
- neighbors_i = [f'{LOCALHOST}:{node.port}' for node in random.sample(peers, min(3, len(peers)))]
|
|
|
|
|
|
+ neighbors_i = [node.endpoint for node in random.sample(peers, min(3, len(peers)))]
|
|
peers.append(await hivemind.DHTNode.create(initial_peers=neighbors_i, parallel_rpc=4))
|
|
peers.append(await hivemind.DHTNode.create(initial_peers=neighbors_i, parallel_rpc=4))
|
|
|
|
|
|
subkeys = [0, '', False, True, 'abyrvalg', 4555]
|
|
subkeys = [0, '', False, True, 'abyrvalg', 4555]
|