test_block_exact_match.py 1.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243
  1. import random
  2. import hivemind
  3. import pytest
  4. import torch
  5. from test_utils import *
  6. from petals.bloom.from_pretrained import load_pretrained_block
  7. from petals.client import DistributedBloomConfig
  8. from petals.client.remote_sequential import RemoteTransformerBlock
  9. from petals.data_structures import UID_DELIMITER
  10. from petals.dht_utils import get_remote_module
  11. @pytest.mark.forked
  12. def test_remote_block_exact_match(atol_forward=1e-5, atol_inference=1e-3):
  13. dht = hivemind.DHT(initial_peers=INITIAL_PEERS, client_mode=True, start=True)
  14. config = DistributedBloomConfig.from_pretrained(MODEL_NAME)
  15. for block_index in random.sample(range(config.n_layer), 3):
  16. remote_block = get_remote_module(dht, f"{MODEL_NAME}{UID_DELIMITER}{block_index}", config)
  17. assert isinstance(remote_block, RemoteTransformerBlock)
  18. inputs = torch.randn(1, 8, config.hidden_size)
  19. outputs_forward = remote_block(inputs)
  20. outputs_inference = []
  21. with remote_block.inference_session(max_length=inputs.shape[1]) as sess:
  22. for i in range(inputs.shape[1]):
  23. outputs_inference.append(sess.step(inputs[:, i : i + 1, :]))
  24. # test that max length is respected
  25. with pytest.raises(ValueError, match=r"Maximum length exceeded") as exc_info:
  26. sess.step(inputs[:, -1:, :])
  27. assert "Maximum length exceeded" in repr(exc_info.value)
  28. outputs_inference = torch.cat(outputs_inference, dim=1)
  29. ref_block = load_pretrained_block(MODEL_NAME, block_index, torch_dtype=torch.float32)
  30. (outputs_local,) = ref_block(inputs)
  31. assert torch.allclose(outputs_local, outputs_forward, rtol=0, atol=atol_forward)
  32. assert torch.allclose(outputs_local, outputs_inference, rtol=0, atol=atol_inference)