inference_one_block.py 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. import argparse
  2. import torch
  3. from hivemind.utils.logging import get_logger, use_hivemind_log_handler
  4. from tqdm.auto import trange
  5. from src.bloom.block import BloomBlock
  6. from src.bloom.model import DistributedBloomConfig
  7. from src.bloom.ops import build_alibi_tensor
  8. use_hivemind_log_handler("in_root_logger")
  9. logger = get_logger(__file__)
  10. def print_device_info(device=None):
  11. """Prints device stats. Code from https://stackoverflow.com/a/53374933/12891528"""
  12. device = torch.device(device or ("cuda" if torch.cuda.is_available() else "cpu"))
  13. logger.info(f"Using device: {device}")
  14. # Additional Info when using cuda
  15. if device.type == "cuda":
  16. logger.info(torch.cuda.get_device_name(0))
  17. logger.info(f"Memory Usage:")
  18. logger.info(f"Allocated: {round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1)} GB")
  19. logger.info(f"Cached: {round(torch.cuda.memory_cached(0) / 1024 ** 3, 1)} GB")
  20. if __name__ == "__main__":
  21. parser = argparse.ArgumentParser(description="Run a single bloom block locally on dummy data")
  22. parser.add_argument("--config", required=True, type=str, help="Path to a config json file")
  23. parser.add_argument("--state_dict", default=None, type=str, help="Optional path to saved block state dict")
  24. parser.add_argument("--layer_index", default=0, type=int, help="Optional path to saved block state dict")
  25. parser.add_argument("--num_steps", default=500, type=int, help="How many inference steps to run")
  26. parser.add_argument("--device", default=None, type=str, help="Run inference on this device")
  27. parser.add_argument("--block-path", default='', type=str, help="The path to the Bloom block-path")
  28. args = parser.parse_args()
  29. if args.device is None:
  30. args.device = "cuda" if torch.cuda.is_available() else "cpu"
  31. print(f'Using device {args.device}')
  32. config = DistributedBloomConfig.from_json_file(args.config)
  33. block = BloomBlock(config, args.layer_index)
  34. if args.block_path != '':
  35. print(f'Loading block from {args.block_path}')
  36. block.load_state_dict( torch.load(args.block_path))
  37. #print(list(block_data.keys()))
  38. #block.load(args.block_path)
  39. block = block.to(args.device)
  40. block = block.to(torch.bfloat16)
  41. cache = None
  42. for i in trange(args.num_steps):
  43. dummy_input = torch.randn(1, 1, config.hidden_size, device=args.device).to(torch.bfloat16)
  44. alibi = build_alibi_tensor(i + 1, config.num_attention_heads).to(args.device)
  45. with torch.no_grad():
  46. outputs, cache = block.forward(dummy_input, alibi=alibi, use_cache=True, layer_past=cache)
  47. print_device_info(args.device)