test_remote_sequential.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. import pytest
  2. import torch
  3. import torch.nn.functional as F
  4. from hivemind import DHT, BatchTensorDescriptor, get_logger
  5. from hivemind.proto import runtime_pb2
  6. from petals import AutoDistributedConfig
  7. from petals.client import RemoteSequenceManager, RemoteSequential
  8. from petals.data_structures import UID_DELIMITER
  9. from petals.server.from_pretrained import load_pretrained_block
  10. from test_utils import *
  11. logger = get_logger(__name__)
  12. @pytest.mark.forked
  13. def test_remote_sequential():
  14. config = AutoDistributedConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
  15. dht = DHT(initial_peers=config.initial_peers, client_mode=True, start=True)
  16. test_inputs = torch.randn(1, 5, config.hidden_size, requires_grad=True)
  17. grad_proj = torch.randn(1, 5, config.hidden_size)
  18. sequential = RemoteSequential(config, dht=dht)
  19. full_outputs = sequential(test_inputs)
  20. (full_outputs * grad_proj).sum().backward()
  21. assert test_inputs.grad is not None
  22. full_grad = test_inputs.grad.clone()
  23. test_inputs.grad.data.zero_()
  24. first_half = sequential[: config.num_hidden_layers // 2]
  25. second_half = sequential[config.num_hidden_layers // 2 :]
  26. assert len(first_half) + len(second_half) == len(sequential)
  27. assert abs(len(first_half) - len(second_half)) == config.num_hidden_layers % 2
  28. for m in sequential, first_half, second_half:
  29. assert isinstance(repr(m), str)
  30. hidden = first_half(test_inputs)
  31. assert isinstance(hidden, torch.Tensor)
  32. assert hidden.shape == test_inputs.shape
  33. assert hidden.requires_grad
  34. second_half_outputs = second_half(hidden)
  35. assert torch.allclose(second_half_outputs, full_outputs, atol=1e-3)
  36. (second_half_outputs * grad_proj).sum().backward()
  37. assert torch.allclose(test_inputs.grad, full_grad, atol=3e-2)
  38. # test RemoteSequential with lossy compression
  39. block_uids = [f"{config.dht_prefix}{UID_DELIMITER}{i}" for i in range(config.num_hidden_layers)]
  40. lossy_sequential = RemoteSequential(
  41. config, sequence_manager=DummyCustomSequenceManager(config, block_uids, dht=dht)
  42. )
  43. test_inputs.grad = None
  44. approx_outputs = lossy_sequential(test_inputs)
  45. (approx_outputs * grad_proj).sum().backward()
  46. assert not torch.allclose(approx_outputs, full_outputs, rtol=0, atol=1e-4), "compression was not used"
  47. assert not torch.allclose(test_inputs.grad, full_grad, rtol=0, atol=1e-3), "compression was not used"
  48. assert abs(approx_outputs - full_outputs).mean() < 0.01
  49. absmax = abs(full_grad).max()
  50. assert abs(test_inputs.grad / absmax - full_grad / absmax).mean() < 0.05
  51. class DummyCustomSequenceManager(RemoteSequenceManager):
  52. """A sequence manager that compresses inputs/outputs during forward and backward pass."""
  53. @property
  54. def rpc_info(self):
  55. rpc_info = super().rpc_info
  56. dims = (2048, 1024)
  57. compressed_input_schema = BatchTensorDescriptor(dims, compression=runtime_pb2.CompressionType.FLOAT16)
  58. rpc_info["forward_schema"] = (compressed_input_schema,), dict() # (args, kwargs)
  59. return rpc_info
  60. def get_request_metadata(self, protocol: str, *args, **kwargs):
  61. metadata = super().get_request_metadata(protocol, *args, **kwargs)
  62. if protocol == "rpc_forward":
  63. metadata["output_compression"] = (runtime_pb2.CompressionType.FLOAT16,)
  64. elif protocol == "rpc_backward":
  65. metadata["output_compression"] = (runtime_pb2.CompressionType.FLOAT16,)
  66. # FIXME: Initially, we used CompressionType.BLOCKWISE_8BIT for rpc_backward() here.
  67. # This is currently broken since hivemind==1.1.8 is not compatible with bitsandbytes==0.39.1.
  68. # Please revert to BLOCKWISE_8BIT once this is fixed: https://github.com/learning-at-home/hivemind/issues/572
  69. return metadata
  70. @pytest.mark.forked
  71. def test_remote_sequential_prompts(batch_size=2, seq_len=5, pre_seq_len=3):
  72. config = AutoDistributedConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
  73. remote_sequential = RemoteSequential(config)
  74. inputs = F.normalize(torch.randn(batch_size, seq_len, config.hidden_size), dim=-1)
  75. output_proj = F.normalize(torch.randn(batch_size, seq_len + pre_seq_len, config.hidden_size), dim=-1)
  76. input_prompts = F.normalize(torch.randn(batch_size, pre_seq_len, config.hidden_size, requires_grad=True), dim=-1)
  77. intermediate_prompts = torch.randn(
  78. config.num_hidden_layers, batch_size, pre_seq_len, config.hidden_size, requires_grad=True
  79. )
  80. input_prompts = input_prompts.detach().requires_grad_(True)
  81. intermediate_prompts = intermediate_prompts.detach().requires_grad_(True)
  82. inputs_with_prompts = torch.cat([inputs, input_prompts], dim=1)
  83. assert inputs_with_prompts.shape == (batch_size, seq_len + pre_seq_len, config.hidden_size)
  84. outputs = remote_sequential(inputs_with_prompts, prompts=intermediate_prompts)
  85. (outputs * output_proj).sum().backward()
  86. assert intermediate_prompts.grad is not None
  87. input_prompts_ref = input_prompts.clone().detach().requires_grad_(True)
  88. intermediate_prompts_ref = intermediate_prompts.clone().detach().requires_grad_(True)
  89. assert input_prompts_ref.grad is None
  90. assert intermediate_prompts_ref.grad is None
  91. outputs_ref = torch.cat([inputs, input_prompts_ref], dim=1)
  92. for block_index in range(config.num_hidden_layers):
  93. block_prompt = intermediate_prompts_ref[block_index]
  94. outputs_ref[:, : block_prompt.shape[1]] += block_prompt
  95. block = load_pretrained_block(MODEL_NAME, block_index=block_index, torch_dtype=torch.float32)
  96. (outputs_ref,) = block(outputs_ref)
  97. assert torch.allclose(outputs_ref, outputs, atol=1e-3)
  98. (outputs_ref * output_proj).sum().backward()
  99. assert input_prompts_ref.grad is not None
  100. assert torch.allclose(input_prompts_ref.grad, input_prompts.grad, atol=3e-2)
  101. assert intermediate_prompts_ref.grad is not None
  102. assert torch.allclose(intermediate_prompts_ref.grad, intermediate_prompts.grad, atol=1e-2)