|
@@ -25,17 +25,18 @@ def test_store_get_experts(n_peers=10):
|
|
expert_uids = [f"my_expert.{i}" for i in range(50)]
|
|
expert_uids = [f"my_expert.{i}" for i in range(50)]
|
|
batch_size = 10
|
|
batch_size = 10
|
|
for batch_start in range(0, len(expert_uids), batch_size):
|
|
for batch_start in range(0, len(expert_uids), batch_size):
|
|
- declare_experts(first_peer, expert_uids[batch_start : batch_start + batch_size], "localhost:1234")
|
|
|
|
|
|
+ declare_experts(first_peer, expert_uids[batch_start : batch_start + batch_size], first_peer.peer_id)
|
|
|
|
|
|
found = get_experts(other_peer, random.sample(expert_uids, 5) + ["foo", "bar"])
|
|
found = get_experts(other_peer, random.sample(expert_uids, 5) + ["foo", "bar"])
|
|
assert all(res is not None for res in found[:-2]), "Could not find some existing experts"
|
|
assert all(res is not None for res in found[:-2]), "Could not find some existing experts"
|
|
assert all(res is None for res in found[-2:]), "Found non-existing experts"
|
|
assert all(res is None for res in found[-2:]), "Found non-existing experts"
|
|
|
|
|
|
- other_expert, other_port = "my_other_expert.1337", random.randint(1000, 9999)
|
|
|
|
- declare_experts(other_peer, [other_expert], f"that_host:{other_port}")
|
|
|
|
|
|
+ other_expert = "my_other_expert.1337"
|
|
|
|
+ declare_experts(other_peer, [other_expert], other_peer.peer_id)
|
|
first_notfound, first_found = get_experts(first_peer, ["foobar", other_expert])
|
|
first_notfound, first_found = get_experts(first_peer, ["foobar", other_expert])
|
|
assert isinstance(first_found, hivemind.RemoteExpert)
|
|
assert isinstance(first_found, hivemind.RemoteExpert)
|
|
- assert first_found.endpoint == f"that_host:{other_port}"
|
|
|
|
|
|
+ assert first_found.server_peer_info.peer_id == other_peer.peer_id
|
|
|
|
+ assert first_notfound is None
|
|
|
|
|
|
# test graceful shutdown
|
|
# test graceful shutdown
|
|
first_peer.shutdown()
|
|
first_peer.shutdown()
|
|
@@ -43,8 +44,8 @@ def test_store_get_experts(n_peers=10):
|
|
time.sleep(1.0)
|
|
time.sleep(1.0)
|
|
remaining_peer1 = random.choice([peer for peer in peers if peer.is_alive()])
|
|
remaining_peer1 = random.choice([peer for peer in peers if peer.is_alive()])
|
|
remaining_peer2 = random.choice([peer for peer in peers if peer.is_alive()])
|
|
remaining_peer2 = random.choice([peer for peer in peers if peer.is_alive()])
|
|
- assert all(declare_experts(remaining_peer1, ["new_expert.1"], "dummy"))
|
|
|
|
- assert get_experts(remaining_peer2, ["new_expert.1"])[0].endpoint == "dummy"
|
|
|
|
|
|
+ assert all(declare_experts(remaining_peer1, ["new_expert.1"], remaining_peer1.peer_id))
|
|
|
|
+ assert get_experts(remaining_peer2, ["new_expert.1"])[0].server_peer_info.peer_id == remaining_peer1.peer_id
|
|
|
|
|
|
|
|
|
|
@pytest.mark.forked
|
|
@pytest.mark.forked
|
|
@@ -59,11 +60,11 @@ def test_beam_search(
|
|
{"expert." + ".".join([str(random.randint(0, dim - 1)) for dim in grid_dims]) for _ in range(total_experts)}
|
|
{"expert." + ".".join([str(random.randint(0, dim - 1)) for dim in grid_dims]) for _ in range(total_experts)}
|
|
)
|
|
)
|
|
for batch_start in range(0, len(real_experts), batch_size):
|
|
for batch_start in range(0, len(real_experts), batch_size):
|
|
|
|
+ dht_ = random.choice(dht)
|
|
declare_experts(
|
|
declare_experts(
|
|
- random.choice(dht),
|
|
|
|
|
|
+ dht_,
|
|
real_experts[batch_start : batch_start + batch_size],
|
|
real_experts[batch_start : batch_start + batch_size],
|
|
- wait=True,
|
|
|
|
- endpoint=f"host{batch_start // batch_size}:{random.randint(0, 65536)}",
|
|
|
|
|
|
+ peer_id=dht_.peer_id,
|
|
)
|
|
)
|
|
|
|
|
|
neighbors = sum([peer.get_visible_maddrs() for peer in random.sample(dht, min(3, len(dht)))], [])
|
|
neighbors = sum([peer.get_visible_maddrs() for peer in random.sample(dht, min(3, len(dht)))], [])
|
|
@@ -89,22 +90,26 @@ def test_dht_single_node():
|
|
node = hivemind.DHT(start=True)
|
|
node = hivemind.DHT(start=True)
|
|
beam_search = MoEBeamSearcher(node, "expert.", grid_size=(10,))
|
|
beam_search = MoEBeamSearcher(node, "expert.", grid_size=(10,))
|
|
|
|
|
|
- assert all(declare_experts(node, ["expert.1", "expert.2", "expert.3"], f"{hivemind.LOCALHOST}:1337").values())
|
|
|
|
- assert len(declare_experts(node, ["ffn.1", "ffn.2"], endpoint="that_place")) == 4
|
|
|
|
- assert len(declare_experts(node, ["e.1.2.3", "e.1.2.5", "e.2.0"], f"{hivemind.LOCALHOST}:42")) == 7
|
|
|
|
|
|
+ assert all(declare_experts(node, ["expert.1", "expert.2", "expert.3"], node.peer_id).values())
|
|
|
|
+ assert len(declare_experts(node, ["ffn.1", "ffn.2"], node.peer_id)) == 4
|
|
|
|
+ assert len(declare_experts(node, ["e.1.2.3", "e.1.2.5", "e.2.0"], node.peer_id)) == 7
|
|
|
|
|
|
for expert in get_experts(node, ["expert.3", "expert.2"]):
|
|
for expert in get_experts(node, ["expert.3", "expert.2"]):
|
|
- assert expert.endpoint == f"{hivemind.LOCALHOST}:1337"
|
|
|
|
|
|
+ assert expert.server_peer_info.peer_id == node.peer_id
|
|
|
|
|
|
- assert all(declare_experts(node, ["expert.5", "expert.2"], f"{hivemind.LOCALHOST}:1337").values())
|
|
|
|
|
|
+ assert all(declare_experts(node, ["expert.5", "expert.2"], node.peer_id).values())
|
|
found_experts = beam_search.find_best_experts([(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0)], beam_size=2)
|
|
found_experts = beam_search.find_best_experts([(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0)], beam_size=2)
|
|
assert len(found_experts) == 2 and [expert.uid for expert in found_experts] == ["expert.5", "expert.3"]
|
|
assert len(found_experts) == 2 and [expert.uid for expert in found_experts] == ["expert.5", "expert.3"]
|
|
|
|
|
|
successors = beam_search.get_active_successors(["e.1.2.", "e.2.", "e.4.5."])
|
|
successors = beam_search.get_active_successors(["e.1.2.", "e.2.", "e.4.5."])
|
|
assert len(successors["e.1.2."]) == 2
|
|
assert len(successors["e.1.2."]) == 2
|
|
- assert successors["e.1.2."][3] == UidEndpoint("e.1.2.3", f"{LOCALHOST}:42")
|
|
|
|
- assert successors["e.1.2."][5] == UidEndpoint("e.1.2.5", f"{LOCALHOST}:42")
|
|
|
|
- assert len(successors["e.2."]) == 1 and successors["e.2."][0] == UidEndpoint("e.2.0", f"{LOCALHOST}:42")
|
|
|
|
|
|
+
|
|
|
|
+ addrs = tuple(str(a.decapsulate("/p2p/" + a.get("p2p"))) for a in node.get_visible_maddrs())
|
|
|
|
+ endpoint = (node.peer_id.to_base58(), addrs)
|
|
|
|
+
|
|
|
|
+ assert successors["e.1.2."][3] == UidEndpoint("e.1.2.3", endpoint)
|
|
|
|
+ assert successors["e.1.2."][5] == UidEndpoint("e.1.2.5", endpoint)
|
|
|
|
+ assert len(successors["e.2."]) == 1 and successors["e.2."][0] == UidEndpoint("e.2.0", endpoint)
|
|
assert successors["e.4.5."] == {}
|
|
assert successors["e.4.5."] == {}
|
|
|
|
|
|
initial_beam = beam_search.get_initial_beam((3, 2, 1, 0, -1, -2, -3), beam_size=3)
|
|
initial_beam = beam_search.get_initial_beam((3, 2, 1, 0, -1, -2, -3), beam_size=3)
|
|
@@ -194,7 +199,7 @@ async def test_negative_caching(n_peers=10):
|
|
peers += [hivemind.DHT(initial_peers=initial_peers, start=True, **dht_kwargs) for _ in range(n_peers - 1)]
|
|
peers += [hivemind.DHT(initial_peers=initial_peers, start=True, **dht_kwargs) for _ in range(n_peers - 1)]
|
|
|
|
|
|
writer_peer = random.choice(peers)
|
|
writer_peer = random.choice(peers)
|
|
- assert all(declare_experts(writer_peer, ["ffn.1.2.3", "ffn.3.4.5"], "myaddr:1234").values())
|
|
|
|
|
|
+ assert all(declare_experts(writer_peer, ["ffn.1.2.3", "ffn.3.4.5"], writer_peer.peer_id).values())
|
|
|
|
|
|
neighbors = sum([peer.get_visible_maddrs() for peer in random.sample(peers, min(3, len(peers)))], [])
|
|
neighbors = sum([peer.get_visible_maddrs() for peer in random.sample(peers, min(3, len(peers)))], [])
|
|
neg_caching_peer = hivemind.DHT(initial_peers=neighbors, start=True, **dht_kwargs)
|
|
neg_caching_peer = hivemind.DHT(initial_peers=neighbors, start=True, **dht_kwargs)
|