|
@@ -1,6 +1,5 @@
|
|
|
from typing import Dict, List, Tuple
|
|
|
|
|
|
-import transformers.utils.logging
|
|
|
from multiaddr import Multiaddr
|
|
|
from pydantic import BaseModel, StrictFloat, confloat, conint
|
|
|
|
|
@@ -9,7 +8,6 @@ from hivemind.dht.crypto import RSASignatureValidator
|
|
|
from hivemind.dht.schema import BytesWithPublicKey, SchemaValidator
|
|
|
from hivemind.dht.validation import RecordValidatorBase
|
|
|
from hivemind.utils.logging import get_logger
|
|
|
-from transformers.trainer_utils import is_main_process
|
|
|
|
|
|
logger = get_logger(__name__)
|
|
|
|
|
@@ -58,15 +56,8 @@ def log_visible_maddrs(visible_maddrs: List[Multiaddr], only_p2p: bool) -> None:
|
|
|
logger.info(f"Full list of visible multiaddresses: {' '.join(str(addr) for addr in visible_maddrs)}")
|
|
|
|
|
|
|
|
|
-def setup_logging(training_args):
|
|
|
- # Log on each process the small summary:
|
|
|
- logger.warning(
|
|
|
- f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
|
|
|
- + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
|
|
|
+def log_process_rank(training_args):
|
|
|
+ logger.info(
|
|
|
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
|
|
|
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
|
|
|
)
|
|
|
- # Set the verbosity to info of the Transformers logger (on main process only):
|
|
|
- if is_main_process(training_args.local_rank):
|
|
|
- transformers.utils.logging.set_verbosity_info()
|
|
|
- transformers.utils.logging.enable_default_handler()
|
|
|
- transformers.utils.logging.enable_explicit_format()
|
|
|
- logger.info("Training/evaluation parameters %s", training_args)
|