flow_base.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. import scipy
  2. from scipy import linalg
  3. from torch.nn import functional as F
  4. import torch
  5. from torch import nn
  6. import numpy as np
  7. import modules.audio2motion.utils as utils
  8. from modules.audio2motion.transformer_models import FFTBlocks
  9. from utils.commons.hparams import hparams
  10. def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
  11. n_channels_int = n_channels[0]
  12. in_act = input_a + input_b
  13. t_act = torch.tanh(in_act[:, :n_channels_int, :])
  14. s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
  15. acts = t_act * s_act
  16. return acts
  17. class WN(torch.nn.Module):
  18. def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0,
  19. p_dropout=0, share_cond_layers=False):
  20. super(WN, self).__init__()
  21. assert (kernel_size % 2 == 1)
  22. assert (hidden_channels % 2 == 0)
  23. self.hidden_channels = hidden_channels
  24. self.kernel_size = kernel_size
  25. self.dilation_rate = dilation_rate
  26. self.n_layers = n_layers
  27. self.gin_channels = gin_channels
  28. self.p_dropout = p_dropout
  29. self.share_cond_layers = share_cond_layers
  30. self.in_layers = torch.nn.ModuleList()
  31. self.res_skip_layers = torch.nn.ModuleList()
  32. self.drop = nn.Dropout(p_dropout)
  33. self.use_adapters = hparams.get("use_adapters", False)
  34. if self.use_adapters:
  35. self.adapter_layers = torch.nn.ModuleList()
  36. if gin_channels != 0 and not share_cond_layers:
  37. cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
  38. self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
  39. for i in range(n_layers):
  40. dilation = dilation_rate ** i
  41. padding = int((kernel_size * dilation - dilation) / 2)
  42. in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size,
  43. dilation=dilation, padding=padding)
  44. in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
  45. self.in_layers.append(in_layer)
  46. # last one is not necessary
  47. if i < n_layers - 1:
  48. res_skip_channels = 2 * hidden_channels
  49. else:
  50. res_skip_channels = hidden_channels
  51. res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
  52. res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
  53. self.res_skip_layers.append(res_skip_layer)
  54. if self.use_adapters:
  55. adapter_layer = MlpAdapter(in_out_dim=res_skip_channels, hid_dim=res_skip_channels//4)
  56. self.adapter_layers.append(adapter_layer)
  57. def forward(self, x, x_mask=None, g=None, **kwargs):
  58. output = torch.zeros_like(x)
  59. n_channels_tensor = torch.IntTensor([self.hidden_channels])
  60. if g is not None and not self.share_cond_layers:
  61. g = self.cond_layer(g)
  62. for i in range(self.n_layers):
  63. x_in = self.in_layers[i](x)
  64. x_in = self.drop(x_in)
  65. if g is not None:
  66. cond_offset = i * 2 * self.hidden_channels
  67. g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :]
  68. else:
  69. g_l = torch.zeros_like(x_in)
  70. acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
  71. res_skip_acts = self.res_skip_layers[i](acts)
  72. if self.use_adapters:
  73. res_skip_acts = self.adapter_layers[i](res_skip_acts.transpose(1,2)).transpose(1,2)
  74. if i < self.n_layers - 1:
  75. x = (x + res_skip_acts[:, :self.hidden_channels, :]) * x_mask
  76. output = output + res_skip_acts[:, self.hidden_channels:, :]
  77. else:
  78. output = output + res_skip_acts
  79. return output * x_mask
  80. def remove_weight_norm(self):
  81. def remove_weight_norm(m):
  82. try:
  83. nn.utils.remove_weight_norm(m)
  84. except ValueError: # this module didn't have weight norm
  85. return
  86. self.apply(remove_weight_norm)
  87. def enable_adapters(self):
  88. if not self.use_adapters:
  89. return
  90. for adapter_layer in self.adapter_layers:
  91. adapter_layer.enable()
  92. def disable_adapters(self):
  93. if not self.use_adapters:
  94. return
  95. for adapter_layer in self.adapter_layers:
  96. adapter_layer.disable()
  97. class Permute(nn.Module):
  98. def __init__(self, *args):
  99. super(Permute, self).__init__()
  100. self.args = args
  101. def forward(self, x):
  102. return x.permute(self.args)
  103. class LayerNorm(nn.Module):
  104. def __init__(self, channels, eps=1e-4):
  105. super().__init__()
  106. self.channels = channels
  107. self.eps = eps
  108. self.gamma = nn.Parameter(torch.ones(channels))
  109. self.beta = nn.Parameter(torch.zeros(channels))
  110. def forward(self, x):
  111. n_dims = len(x.shape)
  112. mean = torch.mean(x, 1, keepdim=True)
  113. variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
  114. x = (x - mean) * torch.rsqrt(variance + self.eps)
  115. shape = [1, -1] + [1] * (n_dims - 2)
  116. x = x * self.gamma.view(*shape) + self.beta.view(*shape)
  117. return x
  118. class ConvReluNorm(nn.Module):
  119. def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
  120. super().__init__()
  121. self.in_channels = in_channels
  122. self.hidden_channels = hidden_channels
  123. self.out_channels = out_channels
  124. self.kernel_size = kernel_size
  125. self.n_layers = n_layers
  126. self.p_dropout = p_dropout
  127. assert n_layers > 1, "Number of layers should be larger than 0."
  128. self.conv_layers = nn.ModuleList()
  129. self.norm_layers = nn.ModuleList()
  130. self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
  131. self.norm_layers.append(LayerNorm(hidden_channels))
  132. self.relu_drop = nn.Sequential(
  133. nn.ReLU(),
  134. nn.Dropout(p_dropout))
  135. for _ in range(n_layers - 1):
  136. self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
  137. self.norm_layers.append(LayerNorm(hidden_channels))
  138. self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
  139. self.proj.weight.data.zero_()
  140. self.proj.bias.data.zero_()
  141. def forward(self, x, x_mask):
  142. x_org = x
  143. for i in range(self.n_layers):
  144. x = self.conv_layers[i](x * x_mask)
  145. x = self.norm_layers[i](x)
  146. x = self.relu_drop(x)
  147. x = x_org + self.proj(x)
  148. return x * x_mask
  149. class ActNorm(nn.Module):
  150. def __init__(self, channels, ddi=False, **kwargs):
  151. super().__init__()
  152. self.channels = channels
  153. self.initialized = not ddi
  154. self.logs = nn.Parameter(torch.zeros(1, channels, 1))
  155. self.bias = nn.Parameter(torch.zeros(1, channels, 1))
  156. def forward(self, x, x_mask=None, reverse=False, **kwargs):
  157. if x_mask is None:
  158. x_mask = torch.ones(x.size(0), 1, x.size(2)).to(device=x.device, dtype=x.dtype)
  159. x_len = torch.sum(x_mask, [1, 2])
  160. if not self.initialized:
  161. self.initialize(x, x_mask)
  162. self.initialized = True
  163. if reverse:
  164. z = (x - self.bias) * torch.exp(-self.logs) * x_mask
  165. logdet = torch.sum(-self.logs) * x_len
  166. else:
  167. z = (self.bias + torch.exp(self.logs) * x) * x_mask
  168. logdet = torch.sum(self.logs) * x_len # [b]
  169. return z, logdet
  170. def store_inverse(self):
  171. pass
  172. def set_ddi(self, ddi):
  173. self.initialized = not ddi
  174. def initialize(self, x, x_mask):
  175. with torch.no_grad():
  176. denom = torch.sum(x_mask, [0, 2])
  177. m = torch.sum(x * x_mask, [0, 2]) / denom
  178. m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom
  179. v = m_sq - (m ** 2)
  180. logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6))
  181. bias_init = (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype)
  182. logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype)
  183. self.bias.data.copy_(bias_init)
  184. self.logs.data.copy_(logs_init)
  185. class InvConvNear(nn.Module):
  186. def __init__(self, channels, n_split=4, no_jacobian=False, lu=True, n_sqz=2, **kwargs):
  187. super().__init__()
  188. assert (n_split % 2 == 0)
  189. self.channels = channels
  190. self.n_split = n_split
  191. self.n_sqz = n_sqz
  192. self.no_jacobian = no_jacobian
  193. w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).normal_())[0]
  194. if torch.det(w_init) < 0:
  195. w_init[:, 0] = -1 * w_init[:, 0]
  196. self.lu = lu
  197. if lu:
  198. # LU decomposition can slightly speed up the inverse
  199. np_p, np_l, np_u = linalg.lu(w_init)
  200. np_s = np.diag(np_u)
  201. np_sign_s = np.sign(np_s)
  202. np_log_s = np.log(np.abs(np_s))
  203. np_u = np.triu(np_u, k=1)
  204. l_mask = np.tril(np.ones(w_init.shape, dtype=float), -1)
  205. eye = np.eye(*w_init.shape, dtype=float)
  206. self.register_buffer('p', torch.Tensor(np_p.astype(float)))
  207. self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float)))
  208. self.l = nn.Parameter(torch.Tensor(np_l.astype(float)), requires_grad=True)
  209. self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)), requires_grad=True)
  210. self.u = nn.Parameter(torch.Tensor(np_u.astype(float)), requires_grad=True)
  211. self.register_buffer('l_mask', torch.Tensor(l_mask))
  212. self.register_buffer('eye', torch.Tensor(eye))
  213. else:
  214. self.weight = nn.Parameter(w_init)
  215. def forward(self, x, x_mask=None, reverse=False, **kwargs):
  216. b, c, t = x.size()
  217. assert (c % self.n_split == 0)
  218. if x_mask is None:
  219. x_mask = 1
  220. x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
  221. else:
  222. x_len = torch.sum(x_mask, [1, 2])
  223. x = x.view(b, self.n_sqz, c // self.n_split, self.n_split // self.n_sqz, t)
  224. x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t)
  225. if self.lu:
  226. self.weight, log_s = self._get_weight()
  227. logdet = log_s.sum()
  228. logdet = logdet * (c / self.n_split) * x_len
  229. else:
  230. logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b]
  231. if reverse:
  232. if hasattr(self, "weight_inv"):
  233. weight = self.weight_inv
  234. else:
  235. weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype)
  236. logdet = -logdet
  237. else:
  238. weight = self.weight
  239. if self.no_jacobian:
  240. logdet = 0
  241. weight = weight.view(self.n_split, self.n_split, 1, 1)
  242. z = F.conv2d(x, weight)
  243. z = z.view(b, self.n_sqz, self.n_split // self.n_sqz, c // self.n_split, t)
  244. z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask
  245. return z, logdet
  246. def _get_weight(self):
  247. l, log_s, u = self.l, self.log_s, self.u
  248. l = l * self.l_mask + self.eye
  249. u = u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(log_s))
  250. weight = torch.matmul(self.p, torch.matmul(l, u))
  251. return weight, log_s
  252. def store_inverse(self):
  253. weight, _ = self._get_weight()
  254. self.weight_inv = torch.inverse(weight.float()).to(next(self.parameters()).device)
  255. class InvConv(nn.Module):
  256. def __init__(self, channels, no_jacobian=False, lu=True, **kwargs):
  257. super().__init__()
  258. w_shape = [channels, channels]
  259. w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(float)
  260. LU_decomposed = lu
  261. if not LU_decomposed:
  262. # Sample a random orthogonal matrix:
  263. self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
  264. else:
  265. np_p, np_l, np_u = linalg.lu(w_init)
  266. np_s = np.diag(np_u)
  267. np_sign_s = np.sign(np_s)
  268. np_log_s = np.log(np.abs(np_s))
  269. np_u = np.triu(np_u, k=1)
  270. l_mask = np.tril(np.ones(w_shape, dtype=float), -1)
  271. eye = np.eye(*w_shape, dtype=float)
  272. self.register_buffer('p', torch.Tensor(np_p.astype(float)))
  273. self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float)))
  274. self.l = nn.Parameter(torch.Tensor(np_l.astype(float)))
  275. self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)))
  276. self.u = nn.Parameter(torch.Tensor(np_u.astype(float)))
  277. self.l_mask = torch.Tensor(l_mask)
  278. self.eye = torch.Tensor(eye)
  279. self.w_shape = w_shape
  280. self.LU = LU_decomposed
  281. self.weight = None
  282. def get_weight(self, device, reverse):
  283. w_shape = self.w_shape
  284. self.p = self.p.to(device)
  285. self.sign_s = self.sign_s.to(device)
  286. self.l_mask = self.l_mask.to(device)
  287. self.eye = self.eye.to(device)
  288. l = self.l * self.l_mask + self.eye
  289. u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s))
  290. dlogdet = self.log_s.sum()
  291. if not reverse:
  292. w = torch.matmul(self.p, torch.matmul(l, u))
  293. else:
  294. l = torch.inverse(l.double()).float()
  295. u = torch.inverse(u.double()).float()
  296. w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
  297. return w.view(w_shape[0], w_shape[1], 1), dlogdet
  298. def forward(self, x, x_mask=None, reverse=False, **kwargs):
  299. """
  300. log-det = log|abs(|W|)| * pixels
  301. """
  302. b, c, t = x.size()
  303. if x_mask is None:
  304. x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
  305. else:
  306. x_len = torch.sum(x_mask, [1, 2])
  307. logdet = 0
  308. if not reverse:
  309. weight, dlogdet = self.get_weight(x.device, reverse)
  310. z = F.conv1d(x, weight)
  311. if logdet is not None:
  312. logdet = logdet + dlogdet * x_len
  313. return z, logdet
  314. else:
  315. if self.weight is None:
  316. weight, dlogdet = self.get_weight(x.device, reverse)
  317. else:
  318. weight, dlogdet = self.weight, self.dlogdet
  319. z = F.conv1d(x, weight)
  320. if logdet is not None:
  321. logdet = logdet - dlogdet * x_len
  322. return z, logdet
  323. def store_inverse(self):
  324. self.weight, self.dlogdet = self.get_weight('cuda', reverse=True)
  325. class Flip(nn.Module):
  326. def forward(self, x, *args, reverse=False, **kwargs):
  327. x = torch.flip(x, [1])
  328. logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
  329. return x, logdet
  330. def store_inverse(self):
  331. pass
  332. class CouplingBlock(nn.Module):
  333. def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, n_layers,
  334. gin_channels=0, p_dropout=0, sigmoid_scale=False,
  335. share_cond_layers=False, wn=None):
  336. super().__init__()
  337. self.in_channels = in_channels
  338. self.hidden_channels = hidden_channels
  339. self.kernel_size = kernel_size
  340. self.dilation_rate = dilation_rate
  341. self.n_layers = n_layers
  342. self.gin_channels = gin_channels
  343. self.p_dropout = p_dropout
  344. self.sigmoid_scale = sigmoid_scale
  345. start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1)
  346. start = torch.nn.utils.weight_norm(start)
  347. self.start = start
  348. # Initializing last layer to 0 makes the affine coupling layers
  349. # do nothing at first. This helps with training stability
  350. end = torch.nn.Conv1d(hidden_channels, in_channels, 1)
  351. end.weight.data.zero_()
  352. end.bias.data.zero_()
  353. self.end = end
  354. self.wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels,
  355. p_dropout, share_cond_layers)
  356. if wn is not None:
  357. self.wn.in_layers = wn.in_layers
  358. self.wn.res_skip_layers = wn.res_skip_layers
  359. def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs):
  360. if x_mask is None:
  361. x_mask = 1
  362. x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:]
  363. x = self.start(x_0) * x_mask
  364. x = self.wn(x, x_mask, g)
  365. out = self.end(x)
  366. z_0 = x_0
  367. m = out[:, :self.in_channels // 2, :]
  368. logs = out[:, self.in_channels // 2:, :]
  369. if self.sigmoid_scale:
  370. logs = torch.log(1e-6 + torch.sigmoid(logs + 2))
  371. if reverse:
  372. z_1 = (x_1 - m) * torch.exp(-logs) * x_mask
  373. logdet = torch.sum(-logs * x_mask, [1, 2])
  374. else:
  375. z_1 = (m + torch.exp(logs) * x_1) * x_mask
  376. logdet = torch.sum(logs * x_mask, [1, 2])
  377. z = torch.cat([z_0, z_1], 1)
  378. return z, logdet
  379. def store_inverse(self):
  380. self.wn.remove_weight_norm()
  381. class GlowFFTBlocks(FFTBlocks):
  382. def __init__(self, hidden_size=128, gin_channels=256, num_layers=2, ffn_kernel_size=5,
  383. dropout=None, num_heads=4, use_pos_embed=True, use_last_norm=True,
  384. norm='ln', use_pos_embed_alpha=True):
  385. super().__init__(hidden_size, num_layers, ffn_kernel_size, dropout, num_heads, use_pos_embed,
  386. use_last_norm, norm, use_pos_embed_alpha)
  387. self.inp_proj = nn.Conv1d(hidden_size + gin_channels, hidden_size, 1)
  388. def forward(self, x, x_mask=None, g=None):
  389. """
  390. :param x: [B, C_x, T]
  391. :param x_mask: [B, 1, T]
  392. :param g: [B, C_g, T]
  393. :return: [B, C_x, T]
  394. """
  395. if g is not None:
  396. x = self.inp_proj(torch.cat([x, g], 1))
  397. x = x.transpose(1, 2)
  398. x = super(GlowFFTBlocks, self).forward(x, x_mask[:, 0] == 0)
  399. x = x.transpose(1, 2)
  400. return x
  401. class TransformerCouplingBlock(nn.Module):
  402. def __init__(self, in_channels, hidden_channels, n_layers,
  403. gin_channels=0, p_dropout=0, sigmoid_scale=False):
  404. super().__init__()
  405. self.in_channels = in_channels
  406. self.hidden_channels = hidden_channels
  407. self.n_layers = n_layers
  408. self.gin_channels = gin_channels
  409. self.p_dropout = p_dropout
  410. self.sigmoid_scale = sigmoid_scale
  411. start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1)
  412. self.start = start
  413. # Initializing last layer to 0 makes the affine coupling layers
  414. # do nothing at first. This helps with training stability
  415. end = torch.nn.Conv1d(hidden_channels, in_channels, 1)
  416. end.weight.data.zero_()
  417. end.bias.data.zero_()
  418. self.end = end
  419. self.fft_blocks = GlowFFTBlocks(
  420. hidden_size=hidden_channels,
  421. ffn_kernel_size=3,
  422. gin_channels=gin_channels,
  423. num_layers=n_layers)
  424. def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs):
  425. if x_mask is None:
  426. x_mask = 1
  427. x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:]
  428. x = self.start(x_0) * x_mask
  429. x = self.fft_blocks(x, x_mask, g)
  430. out = self.end(x)
  431. z_0 = x_0
  432. m = out[:, :self.in_channels // 2, :]
  433. logs = out[:, self.in_channels // 2:, :]
  434. if self.sigmoid_scale:
  435. logs = torch.log(1e-6 + torch.sigmoid(logs + 2))
  436. if reverse:
  437. z_1 = (x_1 - m) * torch.exp(-logs) * x_mask
  438. logdet = torch.sum(-logs * x_mask, [1, 2])
  439. else:
  440. z_1 = (m + torch.exp(logs) * x_1) * x_mask
  441. logdet = torch.sum(logs * x_mask, [1, 2])
  442. z = torch.cat([z_0, z_1], 1)
  443. return z, logdet
  444. def store_inverse(self):
  445. pass
  446. class FreqFFTCouplingBlock(nn.Module):
  447. def __init__(self, in_channels, hidden_channels, n_layers,
  448. gin_channels=0, p_dropout=0, sigmoid_scale=False):
  449. super().__init__()
  450. self.in_channels = in_channels
  451. self.hidden_channels = hidden_channels
  452. self.n_layers = n_layers
  453. self.gin_channels = gin_channels
  454. self.p_dropout = p_dropout
  455. self.sigmoid_scale = sigmoid_scale
  456. hs = hidden_channels
  457. stride = 8
  458. self.start = torch.nn.Conv2d(3, hs, kernel_size=stride * 2,
  459. stride=stride, padding=stride // 2)
  460. end = nn.ConvTranspose2d(hs, 2, kernel_size=stride, stride=stride)
  461. end.weight.data.zero_()
  462. end.bias.data.zero_()
  463. self.end = nn.Sequential(
  464. nn.Conv2d(hs * 3, hs, 3, 1, 1),
  465. nn.ReLU(),
  466. nn.GroupNorm(4, hs),
  467. nn.Conv2d(hs, hs, 3, 1, 1),
  468. end
  469. )
  470. self.fft_v = FFTBlocks(hidden_size=hs, ffn_kernel_size=1, num_layers=n_layers)
  471. self.fft_h = nn.Sequential(
  472. nn.Conv1d(hs, hs, 3, 1, 1),
  473. nn.ReLU(),
  474. nn.Conv1d(hs, hs, 3, 1, 1),
  475. )
  476. self.fft_g = nn.Sequential(
  477. nn.Conv1d(
  478. gin_channels - 160, hs, kernel_size=stride * 2, stride=stride, padding=stride // 2),
  479. Permute(0, 2, 1),
  480. FFTBlocks(hidden_size=hs, ffn_kernel_size=1, num_layers=n_layers),
  481. Permute(0, 2, 1),
  482. )
  483. def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs):
  484. g_, _ = utils.unsqueeze(g)
  485. g_mel = g_[:, :80]
  486. g_txt = g_[:, 80:]
  487. g_mel, _ = utils.squeeze(g_mel)
  488. g_txt, _ = utils.squeeze(g_txt) # [B, C, T]
  489. if x_mask is None:
  490. x_mask = 1
  491. x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:]
  492. x = torch.stack([x_0, g_mel[:, :80], g_mel[:, 80:]], 1)
  493. x = self.start(x) # [B, C, N_bins, T]
  494. B, C, N_bins, T = x.shape
  495. x_v = self.fft_v(x.permute(0, 3, 2, 1).reshape(B * T, N_bins, C))
  496. x_v = x_v.reshape(B, T, N_bins, -1).permute(0, 3, 2, 1)
  497. # x_v = x
  498. x_h = self.fft_h(x.permute(0, 2, 1, 3).reshape(B * N_bins, C, T))
  499. x_h = x_h.reshape(B, N_bins, -1, T).permute(0, 2, 1, 3)
  500. # x_h = x
  501. x_g = self.fft_g(g_txt)[:, :, None, :].repeat(1, 1, 10, 1)
  502. x = torch.cat([x_v, x_h, x_g], 1)
  503. out = self.end(x)
  504. z_0 = x_0
  505. m = out[:, 0]
  506. logs = out[:, 1]
  507. if self.sigmoid_scale:
  508. logs = torch.log(1e-6 + torch.sigmoid(logs + 2))
  509. if reverse:
  510. z_1 = (x_1 - m) * torch.exp(-logs) * x_mask
  511. logdet = torch.sum(-logs * x_mask, [1, 2])
  512. else:
  513. z_1 = (m + torch.exp(logs) * x_1) * x_mask
  514. logdet = torch.sum(logs * x_mask, [1, 2])
  515. z = torch.cat([z_0, z_1], 1)
  516. return z, logdet
  517. def store_inverse(self):
  518. pass
  519. class ResidualCouplingLayer(nn.Module):
  520. def __init__(self,
  521. channels,
  522. hidden_channels,
  523. kernel_size,
  524. dilation_rate,
  525. n_layers,
  526. p_dropout=0,
  527. gin_channels=0,
  528. mean_only=False,
  529. nn_type='wn'):
  530. assert channels % 2 == 0, "channels should be divisible by 2"
  531. super().__init__()
  532. self.channels = channels
  533. self.hidden_channels = hidden_channels
  534. self.kernel_size = kernel_size
  535. self.dilation_rate = dilation_rate
  536. self.n_layers = n_layers
  537. self.half_channels = channels // 2
  538. self.mean_only = mean_only
  539. self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
  540. if nn_type == 'wn':
  541. self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout,
  542. gin_channels=gin_channels)
  543. # elif nn_type == 'conv':
  544. # self.enc = ConditionalConvBlocks(
  545. # hidden_channels, gin_channels, hidden_channels, [1] * n_layers, kernel_size,
  546. # layers_in_block=1, is_BTC=False)
  547. self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
  548. self.post.weight.data.zero_()
  549. self.post.bias.data.zero_()
  550. def forward(self, x, x_mask, g=None, reverse=False):
  551. x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
  552. h = self.pre(x0) * x_mask
  553. h = self.enc(h, x_mask=x_mask, g=g)
  554. stats = self.post(h) * x_mask
  555. if not self.mean_only:
  556. m, logs = torch.split(stats, [self.half_channels] * 2, 1)
  557. else:
  558. m = stats
  559. logs = torch.zeros_like(m)
  560. if not reverse:
  561. x1 = m + x1 * torch.exp(logs) * x_mask
  562. x = torch.cat([x0, x1], 1)
  563. logdet = torch.sum(logs, [1, 2])
  564. return x, logdet
  565. else:
  566. x1 = (x1 - m) * torch.exp(-logs) * x_mask
  567. x = torch.cat([x0, x1], 1)
  568. logdet = -torch.sum(logs, [1, 2])
  569. return x, logdet
  570. class ResidualCouplingBlock(nn.Module):
  571. def __init__(self,
  572. channels,
  573. hidden_channels,
  574. kernel_size,
  575. dilation_rate,
  576. n_layers,
  577. n_flows=4,
  578. gin_channels=0,
  579. nn_type='wn'):
  580. super().__init__()
  581. self.channels = channels
  582. self.hidden_channels = hidden_channels
  583. self.kernel_size = kernel_size
  584. self.dilation_rate = dilation_rate
  585. self.n_layers = n_layers
  586. self.n_flows = n_flows
  587. self.gin_channels = gin_channels
  588. self.flows = nn.ModuleList()
  589. for i in range(n_flows):
  590. self.flows.append(
  591. ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
  592. gin_channels=gin_channels, mean_only=True, nn_type=nn_type))
  593. self.flows.append(Flip())
  594. def forward(self, x, x_mask, g=None, reverse=False):
  595. if not reverse:
  596. for flow in self.flows:
  597. x, _ = flow(x, x_mask, g=g, reverse=reverse)
  598. else:
  599. for flow in reversed(self.flows):
  600. x, _ = flow(x, x_mask, g=g, reverse=reverse)
  601. return x
  602. class Glow(nn.Module):
  603. def __init__(self,
  604. in_channels,
  605. hidden_channels,
  606. kernel_size,
  607. dilation_rate,
  608. n_blocks,
  609. n_layers,
  610. p_dropout=0.,
  611. n_split=4,
  612. n_sqz=2,
  613. sigmoid_scale=False,
  614. gin_channels=0,
  615. inv_conv_type='near',
  616. share_cond_layers=False,
  617. share_wn_layers=0,
  618. ):
  619. super().__init__()
  620. """
  621. Note that regularization likes weight decay can leads to Nan error!
  622. """
  623. self.in_channels = in_channels
  624. self.hidden_channels = hidden_channels
  625. self.kernel_size = kernel_size
  626. self.dilation_rate = dilation_rate
  627. self.n_blocks = n_blocks
  628. self.n_layers = n_layers
  629. self.p_dropout = p_dropout
  630. self.n_split = n_split
  631. self.n_sqz = n_sqz
  632. self.sigmoid_scale = sigmoid_scale
  633. self.gin_channels = gin_channels
  634. self.share_cond_layers = share_cond_layers
  635. if gin_channels != 0 and share_cond_layers:
  636. cond_layer = torch.nn.Conv1d(gin_channels * n_sqz, 2 * hidden_channels * n_layers, 1)
  637. self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
  638. wn = None
  639. self.flows = nn.ModuleList()
  640. for b in range(n_blocks):
  641. self.flows.append(ActNorm(channels=in_channels * n_sqz))
  642. if inv_conv_type == 'near':
  643. self.flows.append(InvConvNear(channels=in_channels * n_sqz, n_split=n_split, n_sqz=n_sqz))
  644. if inv_conv_type == 'invconv':
  645. self.flows.append(InvConv(channels=in_channels * n_sqz))
  646. if share_wn_layers > 0:
  647. if b % share_wn_layers == 0:
  648. wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels * n_sqz,
  649. p_dropout, share_cond_layers)
  650. self.flows.append(
  651. CouplingBlock(
  652. in_channels * n_sqz,
  653. hidden_channels,
  654. kernel_size=kernel_size,
  655. dilation_rate=dilation_rate,
  656. n_layers=n_layers,
  657. gin_channels=gin_channels * n_sqz,
  658. p_dropout=p_dropout,
  659. sigmoid_scale=sigmoid_scale,
  660. share_cond_layers=share_cond_layers,
  661. wn=wn
  662. ))
  663. def forward(self, x, x_mask=None, g=None, reverse=False, return_hiddens=False):
  664. """
  665. x: [B,T,C]
  666. x_mask: [B,T]
  667. g: [B,T,C]
  668. """
  669. x = x.transpose(1,2)
  670. x_mask = x_mask.unsqueeze(1)
  671. if g is not None:
  672. g = g.transpose(1,2)
  673. logdet_tot = 0
  674. if not reverse:
  675. flows = self.flows
  676. else:
  677. flows = reversed(self.flows)
  678. if return_hiddens:
  679. hs = []
  680. if self.n_sqz > 1:
  681. x, x_mask_ = utils.squeeze(x, x_mask, self.n_sqz)
  682. if g is not None:
  683. g, _ = utils.squeeze(g, x_mask, self.n_sqz)
  684. x_mask = x_mask_
  685. if self.share_cond_layers and g is not None:
  686. g = self.cond_layer(g)
  687. for f in flows:
  688. x, logdet = f(x, x_mask, g=g, reverse=reverse)
  689. if return_hiddens:
  690. hs.append(x)
  691. logdet_tot += logdet
  692. if self.n_sqz > 1:
  693. x, x_mask = utils.unsqueeze(x, x_mask, self.n_sqz)
  694. x = x.transpose(1,2)
  695. if return_hiddens:
  696. return x, logdet_tot, hs
  697. return x, logdet_tot
  698. def store_inverse(self):
  699. def remove_weight_norm(m):
  700. try:
  701. nn.utils.remove_weight_norm(m)
  702. except ValueError: # this module didn't have weight norm
  703. return
  704. self.apply(remove_weight_norm)
  705. for f in self.flows:
  706. f.store_inverse()
  707. if __name__ == '__main__':
  708. model = Glow(in_channels=64,
  709. hidden_channels=128,
  710. kernel_size=5,
  711. dilation_rate=1,
  712. n_blocks=12,
  713. n_layers=4,
  714. p_dropout=0.0,
  715. n_split=4,
  716. n_sqz=2,
  717. sigmoid_scale=False,
  718. gin_channels=80
  719. )
  720. exp = torch.rand([1,1440,64])
  721. mel = torch.rand([1,1440,80])
  722. x_mask = torch.ones([1,1440],dtype=torch.float32)
  723. y, logdet = model(exp, x_mask,g=mel, reverse=False)
  724. pred_exp, logdet = model(y, x_mask,g=mel, reverse=False)
  725. # y: [b, t,c=64]
  726. print(" ")