test_speculative_generation.py 1.5 KB

1234567891011121314151617181920212223242526272829303132333435
  1. import random
  2. import pytest
  3. import torch
  4. from petals import AutoDistributedConfig, RemoteSequential
  5. from petals.server.block_functions import MAX_SHORT_INFERENCE_TOKENS
  6. from petals.server.from_pretrained import load_pretrained_block
  7. from test_utils import *
  8. @pytest.mark.forked
  9. def test_remote_block_with_cache_invalidation_exact_match(atol_forward=1e-4, atol_inference=1e-3):
  10. config = AutoDistributedConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
  11. remote_sequential = RemoteSequential(config)
  12. block_index = random.randint(0, config.num_hidden_layers - 1)
  13. remote_block = remote_sequential[block_index]
  14. inputs = torch.randn(1, MAX_SHORT_INFERENCE_TOKENS - 50, config.hidden_size)
  15. short_inputs = torch.randn(1, MAX_SHORT_INFERENCE_TOKENS - 50, config.hidden_size)
  16. short_inputs[:, :2, :] = inputs[:, :2, :]
  17. initial_outputs_inference = None
  18. secondary_outputs_inference = None
  19. with torch.inference_mode():
  20. with remote_block.inference_session(max_length=inputs.shape[1]) as sess:
  21. initial_outputs_inference = sess.step(inputs)
  22. secondary_outputs_inference = sess.step(short_inputs[:, 2:, :], start_from_position=2)
  23. result = torch.cat([initial_outputs_inference[:, :2, :], secondary_outputs_inference], dim=1)
  24. ref_block = load_pretrained_block(MODEL_NAME, block_index, torch_dtype=torch.float32)
  25. (outputs_local,) = ref_block(short_inputs)
  26. assert torch.allclose(outputs_local, result, rtol=0, atol=atol_inference)