from_pretrained.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. """
  2. Utils for fetching pretrained model parts. Currently, this relies on huggingface transformers' from_pretrained code.
  3. If necessary, one can rewrite this to implement a different behavior, such as:
  4. - loading files from a local data source (e.g. S3)
  5. - load files via BitTorrent ( https://pypi.org/project/libtorrent/ ) or IPFS( https://docs.ipfs.io/how-to )
  6. - fetch the weights over IPoAC, using a fleet of trained pigeons ( http://www.faqs.org/rfcs/rfc1149.html )
  7. """
  8. from __future__ import annotations
  9. from typing import Optional, OrderedDict, Union
  10. import torch
  11. from hivemind.utils.logging import get_logger, use_hivemind_log_handler
  12. from transformers.modeling_utils import WEIGHTS_NAME
  13. from transformers.utils.hub import cached_path, hf_bucket_url
  14. from src.bloom import BloomBlock, BloomForCausalLM, DistributedBloomConfig
  15. use_hivemind_log_handler("in_root_logger")
  16. logger = get_logger(__file__)
  17. CLIENT_BRANCH = "client"
  18. BLOCK_BRANCH_PREFIX = "block_"
  19. USER_AGENT = {"file_type": "model", "framework": "pytorch", "from_auto_class": False}
  20. cls = BloomForCausalLM
  21. FORCE_DOWNLOAD = False
  22. RESUME_DOWNLOAD = False
  23. LOCAL_FILES_ONLY = False
  24. def load_pretrained_block(
  25. converted_model_name_or_path: str,
  26. block_index: int,
  27. config: Optional[DistributedBloomConfig] = None,
  28. torch_dtype: Union[torch.dtype, str] = "auto",
  29. use_auth_token: Optional[str]=None
  30. ) -> BloomBlock:
  31. """Load one BloomBlock from a converted model. See convert_model.py (or README.md) on how to convert it."""
  32. if config is None:
  33. config = DistributedBloomConfig.from_pretrained(converted_model_name_or_path, use_auth_token=use_auth_token)
  34. block = BloomBlock(config, layer_number=block_index)
  35. state_dict = _load_state_dict(converted_model_name_or_path, block_index, use_auth_token=use_auth_token)
  36. block.load_state_dict(state_dict)
  37. if torch_dtype == "auto":
  38. with torch.no_grad():
  39. for name, param in block.named_parameters():
  40. assert name in state_dict, f"{name} not in state dict"
  41. param.data = param.data.to(state_dict[name].dtype)
  42. else:
  43. assert torch_dtype in DTYPE_MAP.values(), f"torch_dtype must be one of {list(DTYPE_MAP.values())}"
  44. block = block.to(dtype=torch_dtype)
  45. report = block.load_state_dict(state_dict, strict=True)
  46. logger.info(f"Loaded {converted_model_name_or_path} block {block_index}, {report}")
  47. return block
  48. def _load_state_dict(
  49. pretrained_model_name_or_path: str, block_index: Optional[int] = None, use_auth_token: Optional[str] = None
  50. ) -> OrderedDict[str, torch.Tensor]:
  51. revision = BLOCK_BRANCH_PREFIX + str(block_index) if block_index is not None else CLIENT_BRANCH
  52. archive_file = hf_bucket_url(pretrained_model_name_or_path, filename=WEIGHTS_NAME, revision=revision, mirror=None)
  53. # Load from URL or cache if already cached
  54. resolved_archive_file = cached_path(
  55. archive_file,
  56. cache_dir=None,
  57. force_download=FORCE_DOWNLOAD,
  58. proxies=None,
  59. resume_download=RESUME_DOWNLOAD,
  60. local_files_only=LOCAL_FILES_ONLY,
  61. use_auth_token=use_auth_token,
  62. user_agent=USER_AGENT,
  63. )
  64. state_dict = torch.load(resolved_archive_file, map_location="cpu")
  65. return state_dict
  66. DTYPE_MAP = dict(bfloat16=torch.bfloat16, float16=torch.float16, float32=torch.float32, auto="auto")