test_chained_forward_backward.py 2.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. ######
  2. # Warning:torch this test is a work in progress. It will be modified soon.
  3. # - if you want more stable tests, see test_block_exact_match
  4. # - if you want to figure out chained inference, ask yozh
  5. import os
  6. import hivemind
  7. import torch
  8. from hivemind.moe.expert_uid import ExpertInfo
  9. from src.bloom.from_pretrained import load_pretrained_block
  10. from src.client.remote_block import RemoteTransformerBlock
  11. from src.dht_utils import get_remote_module
  12. INITIAL_PEERS = os.environ.get("INITIAL_PEERS")
  13. if not INITIAL_PEERS:
  14. raise RuntimeError("Must specify INITIAL_PEERS environment variable with one or more peer ids")
  15. INITIAL_PEERS = INITIAL_PEERS.split()
  16. BLOCK_UID = os.environ.get("BLOCK_UID")
  17. if not BLOCK_UID:
  18. raise RuntimeError("Must specify BLOCK_UID as an index of a transformer block to be tested")
  19. REF_NAME = os.environ.get("REF_NAME", "bigscience/test-bloomd-6b3")
  20. # seq_length > 128: rpc_forward_stream & rpc_backward_stream
  21. # seq_length <= 128: rpc_forward & rpc_backward
  22. def test_forward_backward_exact_match(atol_forward=1e-4, atol_backward=1e-4, seq_length=1):
  23. dht = hivemind.DHT(initial_peers=INITIAL_PEERS, client_mode=True, start=True)
  24. (remote_block,) = get_remote_module(dht, BLOCK_UID)
  25. assert remote_block is not None, f"Could not find {BLOCK_UID} in DHT"
  26. assert isinstance(remote_block, RemoteTransformerBlock)
  27. _ = remote_block.info # lazy-init info now, because otherwise we will _break_ info init by chaning _info
  28. remote_block._info = ExpertInfo("bloom6b3.3 bloom6b3.4 bloom6b3.5", remote_block._info.peer_id)
  29. ref_blocks = [
  30. load_pretrained_block(REF_NAME, 3, torch_dtype=torch.float32),
  31. load_pretrained_block(REF_NAME, 4, torch_dtype=torch.float32),
  32. load_pretrained_block(REF_NAME, 5, torch_dtype=torch.float32),
  33. ]
  34. inputs = torch.randn(1, seq_length, 4096, requires_grad=True)
  35. outputs_rpc = remote_block.forward(inputs)[0]
  36. outputs_rpc.sum().backward()
  37. grads_rpc = inputs.grad
  38. inputs.grad = None
  39. hidden_states = inputs
  40. for ref_block in ref_blocks:
  41. hidden_states = ref_block.forward(hidden_states)[0]
  42. outputs_ref = hidden_states
  43. outputs_ref.sum().backward()
  44. grads_ref = inputs.grad
  45. assert torch.allclose(outputs_ref, outputs_rpc, rtol=0, atol=atol_forward)
  46. assert torch.allclose(grads_ref, grads_rpc, rtol=0, atol=atol_backward)