common.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. import os
  5. import torch
  6. import numbers
  7. import deepspeed
  8. from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer
  9. from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer
  10. from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer
  11. from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
  12. from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
  13. from unit.common import preferred_dtype
  14. from unit.simple_model import *
  15. from unittest.mock import MagicMock, patch
  16. def compare_deepspeed_states(saved_model, loaded_model):
  17. # These are compared in more depth in other places
  18. assert hasattr(loaded_model, 'module')
  19. assert saved_model.sparse_tensor_module_names == loaded_model.sparse_tensor_module_names
  20. assert saved_model.skipped_steps == loaded_model.skipped_steps
  21. assert saved_model.global_steps == loaded_model.global_steps
  22. def zero3_params_to_fetch(param_list):
  23. return [p for p in param_list if hasattr(p, 'ds_id') and p.ds_status == ZeroParamStatus.NOT_AVAILABLE]
  24. def compare_model_states(saved_model, loaded_model, compare_optimizer=True, load_module_only=False):
  25. if not load_module_only:
  26. compare_deepspeed_states(saved_model, loaded_model)
  27. params_to_fetch = zero3_params_to_fetch(
  28. list(saved_model.module.named_parameters()) + list(loaded_model.module.named_parameters()))
  29. enable_gather = len(params_to_fetch) > 0
  30. with deepspeed.zero.GatheredParameters(params_to_fetch, enabled=enable_gather):
  31. for p0, p1 in zip(saved_model.module.named_parameters(), loaded_model.module.named_parameters()):
  32. np0, p0 = p0
  33. np1, p1 = p1
  34. if 'deepspeed_moe.gate.wg' in np0:
  35. # these params are converted to float at runtime, cast to half for comparison
  36. p1 = p1.half()
  37. p0 = p0.half()
  38. assert id(p0) != id(p1), f'Comparing fp16 model state tensor against itself : {id(p0)} <====> {id(p1)}'
  39. try:
  40. assert torch.allclose(p0, p1,
  41. atol=1e-07), f"FP16 model state {p0} is not equal to {p1}, names:{np0}, {np1}"
  42. except RuntimeError as err:
  43. print(f"FP16 model state {p0} is not equal to {p1}, names:{np0}, {np1}")
  44. raise err
  45. if not compare_optimizer:
  46. return
  47. if DeepSpeedZeroOptimizer_Stage3 is not None and isinstance(saved_model.optimizer, DeepSpeedZeroOptimizer_Stage3):
  48. for p0, p1 in zip(saved_model.optimizer.fp32_partitioned_groups_flat,
  49. loaded_model.optimizer.fp32_partitioned_groups_flat):
  50. assert torch.allclose(p0, p1, atol=1e-07), f"Fp32 model states {p0} is not equal to {p1}"
  51. elif isinstance(saved_model.optimizer, DeepSpeedZeroOptimizer):
  52. for p0, p1 in zip(saved_model.optimizer.single_partition_of_fp32_groups,
  53. loaded_model.optimizer.single_partition_of_fp32_groups):
  54. assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'
  55. assert torch.allclose(p0, p1, atol=1e-07), f"Fp32 model states {p0} is not equal to {p1}"
  56. elif isinstance(saved_model.optimizer, FP16_Optimizer):
  57. for p0, p1 in zip(saved_model.optimizer.fp32_groups_flat, loaded_model.optimizer.fp32_groups_flat):
  58. assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'
  59. assert torch.allclose(p0, p1, atol=1e-07), f"FP32 model states {p0} is not equal to {p1}"
  60. elif isinstance(saved_model.optimizer, FP16_UnfusedOptimizer):
  61. for params0, params1 in zip(saved_model.optimizer.fp32_groups, loaded_model.optimizer.fp32_groups):
  62. for p0, p1 in zip(params0, params1):
  63. assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'
  64. assert torch.allclose(p0, p1, atol=1e-07), f"FP32 model states {p0} is not equal to {p1}"
  65. elif isinstance(saved_model.optimizer, torch.optim.Optimizer):
  66. pass
  67. else:
  68. assert False, f'Unexpected Optimizer Type: {saved_model.optimizer}'
  69. def compare_state_dicts(state0, state1, expected_mismatch_keys=[]):
  70. key_set0 = set(k for k in state0.keys() if k not in expected_mismatch_keys)
  71. key_set1 = set(k for k in state1.keys() if k not in expected_mismatch_keys)
  72. assert key_set0 == key_set1, f'failure due to key mismatch {key_set0} != {key_set1}'
  73. for k in key_set0:
  74. s0 = state0[k]
  75. s1 = state1[k]
  76. if k in expected_mismatch_keys:
  77. continue
  78. if isinstance(s0, torch.Tensor) and isinstance(s1, torch.Tensor):
  79. assert id(s0) != id(s1), f'Comparing optimizer state tensor against itself: {id(s0)} <====> {id(s1)}'
  80. assert torch.equal(s0.to('cpu'), s1.to('cpu'))
  81. else:
  82. assert s0 == s1, f'failures with keys = {k}, {k}, values = {s0} and {s1}'
  83. def compare_opt_state_dicts(state0, state1, expected_mismatch_keys=[]):
  84. for param_group0, saved_param_group1 in zip(state0['param_groups'], state1['param_groups']):
  85. compare_state_dicts(param_group0, saved_param_group1, expected_mismatch_keys)
  86. assert "state" in state0
  87. assert "state" in state1
  88. assert len([state0["state"].keys()]) == len([state1["state"].keys()])
  89. for (k0, s0), (k1, s1) in zip(state0["state"].items(), state1["state"].items()):
  90. assert k0 == k1, f'failure due to key mismatch {k0} != {k1}'
  91. compare_state_dicts(s0, s1, expected_mismatch_keys)
  92. def compare_optimizer_states(saved_model, loaded_model, hidden_dim, fp16=True):
  93. saved_optimizer = saved_model.optimizer.optimizer if fp16 else saved_model.optimizer
  94. loaded_optimizer = loaded_model.optimizer.optimizer if fp16 else loaded_model.optimizer
  95. for state0, state1 in zip(saved_optimizer.state.values(), loaded_optimizer.state.values()):
  96. compare_state_dicts(state0, state1)
  97. def compare_lr_scheduler_states(saved_model, loaded_model):
  98. assert hasattr(saved_model, 'lr_scheduler')
  99. assert hasattr(loaded_model, 'lr_scheduler')
  100. saved_scheduler = saved_model.lr_scheduler
  101. loaded_scheduler = loaded_model.lr_scheduler
  102. assert hasattr(saved_scheduler, 'state_dict')
  103. assert hasattr(loaded_scheduler, 'state_dict')
  104. saved_sd = saved_scheduler.state_dict()
  105. loaded_sd = loaded_scheduler.state_dict()
  106. print(f"saved_sd = {saved_sd}")
  107. print(f"loaded_sd = {loaded_sd}")
  108. assert saved_sd.keys() == loaded_sd.keys()
  109. for state0, state1 in zip(saved_sd.values(), loaded_sd.values()):
  110. if isinstance(state0, numbers.Number) and isinstance(state1, numbers.Number):
  111. assert state0 == state1
  112. # following mixture-of-experts.md
  113. def create_moe_param_groups(model):
  114. from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer
  115. parameters = {'params': [p for p in model.parameters()], 'name': 'parameters'}
  116. return split_params_into_different_moe_groups_for_optimizer(parameters)
  117. def create_deepspeed_model(config_dict, model, base_optimizer):
  118. ds_model, _, _, _ = deepspeed.initialize(config=config_dict,
  119. model=model,
  120. model_parameters=create_moe_param_groups(model),
  121. optimizer=base_optimizer)
  122. ds_model.empty_partition_cache()
  123. return ds_model
  124. def checkpoint_correctness_verification(config_dict,
  125. models,
  126. hidden_dim,
  127. tmpdir,
  128. load_optimizer_states=False,
  129. load_lr_scheduler_states=False,
  130. train_batch=False,
  131. base_optimizers=[None, None],
  132. empty_tag=False,
  133. seq_dataloader=False,
  134. load_module_only=False,
  135. dtype=None):
  136. if dtype == None:
  137. dtype = preferred_dtype()
  138. ds_model = create_deepspeed_model(config_dict=config_dict, model=models[0], base_optimizer=base_optimizers[0])
  139. if seq_dataloader:
  140. data_loader = sequence_dataloader(model=ds_model,
  141. total_samples=50,
  142. hidden_dim=hidden_dim,
  143. device=ds_model.device,
  144. dtype=dtype)
  145. else:
  146. data_loader = random_dataloader(model=ds_model,
  147. total_samples=50,
  148. hidden_dim=hidden_dim,
  149. device=ds_model.device,
  150. dtype=dtype)
  151. if train_batch:
  152. ds_model.set_dataloader(data_loader)
  153. for _, batch in enumerate(data_loader):
  154. loss = ds_model.train_batch()
  155. else:
  156. for _, batch in enumerate(data_loader):
  157. loss = ds_model(batch[0], batch[1])
  158. ds_model.backward(loss)
  159. ds_model.step()
  160. # Flush zero stage 3 cache
  161. ds_model.empty_partition_cache()
  162. trained_model = ds_model
  163. save_folder = os.path.join(tmpdir, 'saved_checkpoint')
  164. save_tag = None if empty_tag else '1'
  165. trained_model.save_checkpoint(save_folder, tag=save_tag)
  166. dist.barrier()
  167. for root, _, files in os.walk(save_folder):
  168. for f in files:
  169. if "_expert_" in f and "_model_states" in f:
  170. expert = torch.load(os.path.join(root, f))
  171. needed, storages = 0, {}
  172. for name, tensor in expert.items():
  173. needed += tensor.size().numel()
  174. storage = tensor.storage()
  175. # some storage can be shared within an expert's checkpoint
  176. storages[storage.data_ptr()] = storage.size()
  177. stored = sum(v for _, v in storages.items())
  178. assert needed == stored, f"MoE expert checkpoint uses more storage than required: {f}"
  179. loaded_model = create_deepspeed_model(config_dict=config_dict, model=models[1], base_optimizer=base_optimizers[1])
  180. assert list(trained_model.parameters())[0].dtype == list(loaded_model.parameters())[0].dtype
  181. context = patch.object(loaded_model, "_get_optimizer_ckpt_name",
  182. wraps=loaded_model._get_optimizer_ckpt_name) if not load_optimizer_states else MagicMock()
  183. with context as optim_load_state_dict_mock:
  184. loaded_model.load_checkpoint(save_folder,
  185. tag=save_tag,
  186. load_optimizer_states=load_optimizer_states,
  187. load_lr_scheduler_states=load_lr_scheduler_states,
  188. load_module_only=load_module_only)
  189. if not load_optimizer_states:
  190. # should not attempt to get the file name to load it
  191. optim_load_state_dict_mock.assert_not_called()
  192. compare_model_states(trained_model,
  193. loaded_model,
  194. compare_optimizer=load_optimizer_states,
  195. load_module_only=load_module_only)
  196. if load_optimizer_states:
  197. compare_optimizer_states(trained_model, loaded_model, hidden_dim, dtype == torch.float16)
  198. if load_lr_scheduler_states:
  199. compare_lr_scheduler_states(trained_model, loaded_model)