test_block_exact_match.py 1.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243
  1. import random
  2. import pytest
  3. import torch
  4. from petals import AutoDistributedConfig, RemoteSequential
  5. from petals.server.block_functions import MAX_SHORT_INFERENCE_TOKENS
  6. from petals.server.from_pretrained import load_pretrained_block
  7. from test_utils import *
  8. @pytest.mark.forked
  9. def test_remote_block_exact_match(atol_forward=1e-4, atol_inference=1e-3):
  10. config = AutoDistributedConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
  11. remote_sequential = RemoteSequential(config)
  12. block_index = random.randint(0, config.num_hidden_layers - 1)
  13. remote_block = remote_sequential[block_index]
  14. inputs = torch.randn(1, MAX_SHORT_INFERENCE_TOKENS + 8, config.hidden_size)
  15. outputs_forward = remote_block(inputs)
  16. outputs_inference = []
  17. with torch.inference_mode():
  18. with remote_block.inference_session(max_length=inputs.shape[1]) as sess:
  19. # Test long inference (unmerged inference pools)
  20. outputs_inference.append(sess.step(inputs[:, : MAX_SHORT_INFERENCE_TOKENS + 1, :]))
  21. # Test short inference (merged inference pools)
  22. for i in range(MAX_SHORT_INFERENCE_TOKENS + 1, inputs.shape[1]):
  23. outputs_inference.append(sess.step(inputs[:, i : i + 1, :]))
  24. # test that max length is respected
  25. with pytest.raises(ValueError, match=r"Maximum length exceeded") as exc_info:
  26. sess.step(inputs[:, -1:, :])
  27. assert "Maximum length exceeded" in repr(exc_info.value)
  28. outputs_inference = torch.cat(outputs_inference, dim=1)
  29. ref_block = load_pretrained_block(MODEL_NAME, block_index, torch_dtype=torch.float32)
  30. (outputs_local,) = ref_block(inputs)
  31. assert torch.allclose(outputs_local, outputs_forward, rtol=0, atol=atol_forward)
  32. assert torch.allclose(outputs_local, outputs_inference, rtol=0, atol=atol_inference)