lm_visualizer.py 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. import numpy as np
  2. import cv2
  3. from data_util.face3d_helper import Face3DHelper
  4. from utils.visualization.ffmpeg_utils import imgs_to_video
  5. import os
  6. face3d_helper = Face3DHelper('deep_3drecon/BFM', keypoint_mode='mediapipe')
  7. # lrs3_stats = np.load('data/binary/lrs3/stats.npy',allow_pickle=True).tolist()
  8. # lrs3_idexp_mean = lrs3_stats['idexp_lm3d_mean'].reshape([1,204])
  9. # lrs3_idexp_std = lrs3_stats['idexp_lm3d_std'].reshape([1,204])
  10. def render_idexp_npy_to_lm_video(npy_name, out_video_name, audio_name=None):
  11. try:
  12. idexp_lm3d = np.load(npy_name)
  13. except:
  14. coeff = np.load(npy_name, allow_pickle=True).tolist()
  15. t = coeff['exp'].shape[0]
  16. # print(coeff['id'][0]-coeff['id'][1])
  17. if len(coeff['id']) == 1:
  18. coeff['id'] = np.repeat(coeff['id'], t, axis=0)
  19. idexp_lm3d = face3d_helper.reconstruct_idexp_lm3d_np(coeff['id'], coeff['exp']).reshape([t, -1])
  20. lm3d = idexp_lm3d / 10 + face3d_helper.key_mean_shape.squeeze().reshape([1, -1]).cpu().numpy()
  21. lm3d = lm3d.reshape([t, -1, 3])
  22. # lm3d[..., 0] = 0.5 # lm3d[:,:1, 0].repeat(lm3d.shape[1], axis=1)
  23. tmp_img_dir = os.path.join(os.path.dirname(out_video_name), "tmp_lm3d_imgs")
  24. os.makedirs(tmp_img_dir, exist_ok=True)
  25. WH = 512
  26. lm3d = (lm3d * WH/2 + WH/2).astype(int)
  27. # eye_idx = list(range(36,48))
  28. # mouth_idx = list(range(48,68))
  29. for i_img in range(len(lm3d)):
  30. lm2d = lm3d[i_img ,:, :2] # [68, 2]
  31. img = np.ones([WH, WH, 3], dtype=np.uint8) * 255
  32. for i in range(len(lm2d)):
  33. x, y = lm2d[i]
  34. color = (255,0,0)
  35. img = cv2.circle(img, center=(x,y), radius=3, color=color, thickness=-1)
  36. font = cv2.FONT_HERSHEY_SIMPLEX
  37. img = cv2.flip(img, 0)
  38. for i in range(len(lm2d)):
  39. x, y = lm2d[i]
  40. y = WH - y
  41. img = cv2.putText(img, f"{i}", org=(x,y), fontFace=font, fontScale=0.3, color=(255,0,0))
  42. out_name = os.path.join(tmp_img_dir, f'{format(i_img, "05d")}.png')
  43. cv2.imwrite(out_name, img)
  44. imgs_to_video(tmp_img_dir, out_video_name, audio_name)
  45. os.system(f"rm -r {tmp_img_dir}")
  46. print(f"landmark video saved at {out_video_name}")
  47. if __name__ == '__main__':
  48. import argparse
  49. argparser = argparse.ArgumentParser()
  50. argparser.add_argument('--npy_name', type=str, default="infer_out/May/pred_lm3d/zozo.npy", help='the path of landmark .npy')
  51. argparser.add_argument('--audio_name', type=str, default="data/raw/val_wavs/zozo.wav", help='the path of audio file')
  52. argparser.add_argument('--out_path', type=str, default="infer_out/May/visualized_lm3d/zozo.mp4", help='the path to save visualization results')
  53. args = argparser.parse_args()
  54. render_idexp_npy_to_lm_video(args.npy_name, args.out_path, audio_name=args.audio_name)