|
@@ -14,27 +14,27 @@ from hivemind.moe.server.expert_uid import (
|
|
|
is_valid_uid,
|
|
|
split_uid,
|
|
|
)
|
|
|
-from hivemind.utils import Endpoint, get_dht_time, get_port
|
|
|
+from hivemind.p2p import PeerID, PeerInfo
|
|
|
+from hivemind.utils import get_dht_time
|
|
|
|
|
|
|
|
|
class DHTHandlerThread(threading.Thread):
|
|
|
- def __init__(self, experts, dht: DHT, endpoint: Endpoint, update_period: int = 5, **kwargs):
|
|
|
+ def __init__(self, experts, dht: DHT, peer_id: PeerID, update_period: int = 5, **kwargs):
|
|
|
super().__init__(**kwargs)
|
|
|
- assert get_port(endpoint) is not None
|
|
|
- self.endpoint = endpoint
|
|
|
+ self.peer_id = peer_id
|
|
|
self.experts = experts
|
|
|
self.dht = dht
|
|
|
self.update_period = update_period
|
|
|
self.stop = threading.Event()
|
|
|
|
|
|
def run(self) -> None:
|
|
|
- declare_experts(self.dht, self.experts.keys(), self.endpoint)
|
|
|
+ declare_experts(self.dht, self.experts.keys(), self.peer_id)
|
|
|
while not self.stop.wait(self.update_period):
|
|
|
- declare_experts(self.dht, self.experts.keys(), self.endpoint)
|
|
|
+ declare_experts(self.dht, self.experts.keys(), self.peer_id)
|
|
|
|
|
|
|
|
|
def declare_experts(
|
|
|
- dht: DHT, uids: Sequence[ExpertUID], endpoint: Endpoint, expiration: DHTExpiration = 300, wait: bool = True
|
|
|
+ dht: DHT, uids: Sequence[ExpertUID], peer_id: PeerID, expiration: DHTExpiration = 300, wait: bool = True
|
|
|
) -> Dict[ExpertUID, bool]:
|
|
|
"""
|
|
|
Make experts visible to all DHT peers; update timestamps if declared previously.
|
|
@@ -49,22 +49,22 @@ def declare_experts(
|
|
|
for uid in uids:
|
|
|
assert is_valid_uid(uid), f"{uid} is not a valid expert uid. All uids must follow {UID_PATTERN.pattern}"
|
|
|
return dht.run_coroutine(
|
|
|
- partial(_declare_experts, uids=list(uids), endpoint=endpoint, expiration=expiration), return_future=not wait
|
|
|
+ partial(_declare_experts, uids=list(uids), peer_id=peer_id, expiration=expiration), return_future=not wait
|
|
|
)
|
|
|
|
|
|
|
|
|
async def _declare_experts(
|
|
|
- dht: DHT, node: DHTNode, uids: List[ExpertUID], endpoint: Endpoint, expiration: DHTExpiration
|
|
|
+ dht: DHT, node: DHTNode, uids: List[ExpertUID], peer_id: PeerID, expiration: DHTExpiration
|
|
|
) -> Dict[ExpertUID, bool]:
|
|
|
num_workers = len(uids) if dht.num_workers is None else min(len(uids), dht.num_workers)
|
|
|
expiration_time = get_dht_time() + expiration
|
|
|
data_to_store: Dict[Tuple[ExpertPrefix, Optional[Coordinate]], DHTValue] = {}
|
|
|
for uid in uids:
|
|
|
- data_to_store[uid, None] = endpoint
|
|
|
+ data_to_store[uid, None] = peer_id.to_base58()
|
|
|
prefix = uid if uid.count(UID_DELIMITER) > 1 else f"{uid}{UID_DELIMITER}{FLAT_EXPERT}"
|
|
|
for i in range(prefix.count(UID_DELIMITER) - 1):
|
|
|
prefix, last_coord = split_uid(prefix)
|
|
|
- data_to_store[prefix, last_coord] = [uid, endpoint]
|
|
|
+ data_to_store[prefix, last_coord] = [uid, peer_id.to_base58()]
|
|
|
|
|
|
keys, maybe_subkeys, values = zip(*((key, subkey, value) for (key, subkey), value in data_to_store.items()))
|
|
|
store_ok = await node.store_many(keys, values, expiration_time, subkeys=maybe_subkeys, num_workers=num_workers)
|
|
@@ -94,6 +94,6 @@ async def _get_experts(
|
|
|
|
|
|
experts: List[Optional[RemoteExpert]] = [None] * len(uids)
|
|
|
for i, uid in enumerate(uids):
|
|
|
- if found[uid] is not None and isinstance(found[uid].value, Endpoint):
|
|
|
- experts[i] = RemoteExpert(uid, found[uid].value)
|
|
|
+ if found[uid] is not None and isinstance(found[uid].value, PeerID):
|
|
|
+ experts[i] = RemoteExpert(uid, PeerInfo(peer_id=found[uid].value, addrs=[]))
|
|
|
return experts
|