util.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. import importlib
  2. import torch
  3. from torch import optim
  4. import numpy as np
  5. from inspect import isfunction
  6. from PIL import Image, ImageDraw, ImageFont
  7. def log_txt_as_img(wh, xc, size=10):
  8. # wh a tuple of (width, height)
  9. # xc a list of captions to plot
  10. b = len(xc)
  11. txts = list()
  12. for bi in range(b):
  13. txt = Image.new("RGB", wh, color="white")
  14. draw = ImageDraw.Draw(txt)
  15. font = ImageFont.truetype('fonts/DejaVuSans.ttf', size=size)
  16. nc = int(40 * (wh[0] / 256))
  17. lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
  18. try:
  19. draw.text((0, 0), lines, fill="black", font=font)
  20. except UnicodeEncodeError:
  21. print("Can't encode string for logging. Skipping.")
  22. txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
  23. txts.append(txt)
  24. txts = np.stack(txts)
  25. txts = torch.tensor(txts)
  26. return txts
  27. def ismap(x):
  28. if not isinstance(x, torch.Tensor):
  29. return False
  30. return (len(x.shape) == 4) and (x.shape[1] > 3)
  31. def isimage(x):
  32. if not isinstance(x,torch.Tensor):
  33. return False
  34. return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
  35. def exists(x):
  36. return x is not None
  37. def default(val, d):
  38. if exists(val):
  39. return val
  40. return d() if isfunction(d) else d
  41. def mean_flat(tensor):
  42. """
  43. https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
  44. Take the mean over all non-batch dimensions.
  45. """
  46. return tensor.mean(dim=list(range(1, len(tensor.shape))))
  47. def count_params(model, verbose=False):
  48. total_params = sum(p.numel() for p in model.parameters())
  49. if verbose:
  50. print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
  51. return total_params
  52. def instantiate_from_config(config):
  53. if not "target" in config:
  54. if config == '__is_first_stage__':
  55. return None
  56. elif config == "__is_unconditional__":
  57. return None
  58. raise KeyError("Expected key `target` to instantiate.")
  59. return get_obj_from_str(config["target"])(**config.get("params", dict()))
  60. def get_obj_from_str(string, reload=False):
  61. module, cls = string.rsplit(".", 1)
  62. if reload:
  63. module_imp = importlib.import_module(module)
  64. importlib.reload(module_imp)
  65. return getattr(importlib.import_module(module, package=None), cls)
  66. class AdamWwithEMAandWings(optim.Optimizer):
  67. # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298
  68. def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using
  69. weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code
  70. ema_power=1., param_names=()):
  71. """AdamW that saves EMA versions of the parameters."""
  72. if not 0.0 <= lr:
  73. raise ValueError("Invalid learning rate: {}".format(lr))
  74. if not 0.0 <= eps:
  75. raise ValueError("Invalid epsilon value: {}".format(eps))
  76. if not 0.0 <= betas[0] < 1.0:
  77. raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
  78. if not 0.0 <= betas[1] < 1.0:
  79. raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
  80. if not 0.0 <= weight_decay:
  81. raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
  82. if not 0.0 <= ema_decay <= 1.0:
  83. raise ValueError("Invalid ema_decay value: {}".format(ema_decay))
  84. defaults = dict(lr=lr, betas=betas, eps=eps,
  85. weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay,
  86. ema_power=ema_power, param_names=param_names)
  87. super().__init__(params, defaults)
  88. def __setstate__(self, state):
  89. super().__setstate__(state)
  90. for group in self.param_groups:
  91. group.setdefault('amsgrad', False)
  92. @torch.no_grad()
  93. def step(self, closure=None):
  94. """Performs a single optimization step.
  95. Args:
  96. closure (callable, optional): A closure that reevaluates the model
  97. and returns the loss.
  98. """
  99. loss = None
  100. if closure is not None:
  101. with torch.enable_grad():
  102. loss = closure()
  103. for group in self.param_groups:
  104. params_with_grad = []
  105. grads = []
  106. exp_avgs = []
  107. exp_avg_sqs = []
  108. ema_params_with_grad = []
  109. state_sums = []
  110. max_exp_avg_sqs = []
  111. state_steps = []
  112. amsgrad = group['amsgrad']
  113. beta1, beta2 = group['betas']
  114. ema_decay = group['ema_decay']
  115. ema_power = group['ema_power']
  116. for p in group['params']:
  117. if p.grad is None:
  118. continue
  119. params_with_grad.append(p)
  120. if p.grad.is_sparse:
  121. raise RuntimeError('AdamW does not support sparse gradients')
  122. grads.append(p.grad)
  123. state = self.state[p]
  124. # State initialization
  125. if len(state) == 0:
  126. state['step'] = 0
  127. # Exponential moving average of gradient values
  128. state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
  129. # Exponential moving average of squared gradient values
  130. state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
  131. if amsgrad:
  132. # Maintains max of all exp. moving avg. of sq. grad. values
  133. state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
  134. # Exponential moving average of parameter values
  135. state['param_exp_avg'] = p.detach().float().clone()
  136. exp_avgs.append(state['exp_avg'])
  137. exp_avg_sqs.append(state['exp_avg_sq'])
  138. ema_params_with_grad.append(state['param_exp_avg'])
  139. if amsgrad:
  140. max_exp_avg_sqs.append(state['max_exp_avg_sq'])
  141. # update the steps for each param group update
  142. state['step'] += 1
  143. # record the step after step update
  144. state_steps.append(state['step'])
  145. optim._functional.adamw(params_with_grad,
  146. grads,
  147. exp_avgs,
  148. exp_avg_sqs,
  149. max_exp_avg_sqs,
  150. state_steps,
  151. amsgrad=amsgrad,
  152. beta1=beta1,
  153. beta2=beta2,
  154. lr=group['lr'],
  155. weight_decay=group['weight_decay'],
  156. eps=group['eps'],
  157. maximize=False)
  158. cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power)
  159. for param, ema_param in zip(params_with_grad, ema_params_with_grad):
  160. ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay)
  161. return loss