{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "d076ea64-ea9f-49d6-82d2-209da4556e78",
   "metadata": {
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "source": [
    "# EchoMimicV2 DEMO"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a0a1105f-87f4-4b4c-9edf-902b503cd2b3",
   "metadata": {
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "# Copyright 2024 AntGroup\n",
    "#\n",
    "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
    "# you may not use this file except in compliance with the License.\n",
    "# You may obtain a copy of the License at\n",
    "#\n",
    "#      http://www.apache.org/licenses/LICENSE-2.0\n",
    "#\n",
    "# Unless required by applicable law or agreed to in writing, software\n",
    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
    "# See the License for the specific language governing permissions and\n",
    "# limitations under the License."
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7e020814-a61b-4850-896b-c09bb664ddab",
   "metadata": {
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "source": [
    "## Input Refimg, Audio, and Pose"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0b509db6-0757-4b41-b648-86debe2e8993",
   "metadata": {
    "execution": {},
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "refimg_path = './assets/halfbody_demo/refimag/test.png'\n",
    "audio_path ='./assets/halfbody_demo/audio/chinese/echomimicv2_woman.wav'\n",
    "using_video_driving = False\n",
    "if not using_video_driving:\n",
    "  pose_path = './assets/halfbody_demo/pose/good'"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "213f0acb-ae0b-4d4e-8045-e18f101da5dc",
   "metadata": {
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "source": [
    "## Align reference image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "05581a41-300a-4afd-9ef6-f19acb29a270",
   "metadata": {
    "execution": {},
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "# reference image aligned\n",
    "import sys\n",
    "from src.utils.img_utils import pil_to_cv2, cv2_to_pil, center_crop_cv2, pils_from_video, save_videos_from_pils, save_video_from_cv2_list\n",
    "from PIL import Image\n",
    "import cv2\n",
    "from IPython import embed\n",
    "import numpy as np\n",
    "import copy\n",
    "from src.utils.motion_utils import motion_sync\n",
    "import pathlib\n",
    "import torch\n",
    "import pickle\n",
    "from glob import glob\n",
    "import os\n",
    "from src.models.dwpose.dwpose_detector import dwpose_detector as dwprocessor\n",
    "from src.models.dwpose.util import draw_pose\n",
    "import decord\n",
    "from tqdm import tqdm\n",
    "from moviepy.editor import AudioFileClip, VideoFileClip\n",
    "from multiprocessing.pool import ThreadPool\n",
    "\n",
    "##################################\n",
    "process_num = 100 #1266\n",
    "\n",
    "start = 0\n",
    "end = process_num + start\n",
    "#################################\n",
    "MAX_SIZE = 768\n",
    "\n",
    "def convert_fps(src_path, tgt_path, tgt_fps=24, tgt_sr=16000):\n",
    "    clip = VideoFileClip(src_path)\n",
    "    new_clip = clip.set_fps(tgt_fps)\n",
    "    if tgt_fps is not None:\n",
    "        audio = new_clip.audio\n",
    "        audio = audio.set_fps(tgt_sr)\n",
    "        new_clip = new_clip.set_audio(audio)\n",
    "    if '.mov' in tgt_path:\n",
    "        tgt_path = tgt_path.replace('.mov', '.mp4')\n",
    "    new_clip.write_videofile(tgt_path, codec='libx264', audio_codec='aac')\n",
    "    \n",
    "def get_video_pose(\n",
    "        video_path: str, \n",
    "        sample_stride: int=1,\n",
    "        max_frame=None):\n",
    "\n",
    "    # read input video\n",
    "    vr = decord.VideoReader(video_path, ctx=decord.cpu(0))\n",
    "    sample_stride *= max(1, int(vr.get_avg_fps() / 24))\n",
    "\n",
    "    frames = vr.get_batch(list(range(0, len(vr), sample_stride))).asnumpy()\n",
    "    # print(frames[0])\n",
    "    if max_frame is not None:\n",
    "        frames = frames[0:max_frame,:,:]\n",
    "    height, width, _ = frames[0].shape\n",
    "    detected_poses = [dwprocessor(frm) for frm in frames]\n",
    "    dwprocessor.release_memory()\n",
    "\n",
    "    return detected_poses, height, width, frames\n",
    "\n",
    "def resize_and_pad(img, max_size):\n",
    "    img_new = np.zeros((max_size, max_size, 3)).astype('uint8')\n",
    "    imh, imw = img.shape[0], img.shape[1]\n",
    "    half = max_size // 2\n",
    "    if imh > imw:\n",
    "        imh_new = max_size\n",
    "        imw_new = int(round(imw/imh * imh_new))\n",
    "        half_w = imw_new // 2\n",
    "        rb, re = 0, max_size\n",
    "        cb = half-half_w\n",
    "        ce = cb + imw_new\n",
    "    else:\n",
    "        imw_new = max_size\n",
    "        imh_new = int(round(imh/imw * imw_new))\n",
    "        half_h = imh_new // 2\n",
    "        cb, ce = 0, max_size\n",
    "        rb = half-half_h\n",
    "        re = rb + imh_new\n",
    "\n",
    "    img_resize = cv2.resize(img, (imw_new, imh_new))\n",
    "    img_new[rb:re,cb:ce,:] = img_resize\n",
    "    return img_new\n",
    "\n",
    "def resize_and_pad_param(imh, imw, max_size):\n",
    "    half = max_size // 2\n",
    "    if imh > imw:\n",
    "        imh_new = max_size\n",
    "        imw_new = int(round(imw/imh * imh_new))\n",
    "        half_w = imw_new // 2\n",
    "        rb, re = 0, max_size\n",
    "        cb = half-half_w\n",
    "        ce = cb + imw_new\n",
    "    else:\n",
    "        imw_new = max_size\n",
    "        imh_new = int(round(imh/imw * imw_new))\n",
    "        imh_new = max_size\n",
    "\n",
    "        half_h = imh_new // 2\n",
    "        cb, ce = 0, max_size\n",
    "        rb = half-half_h\n",
    "        re = rb + imh_new\n",
    "        \n",
    "    return imh_new, imw_new, rb, re, cb, ce\n",
    "\n",
    "def get_pose_params(detected_poses, max_size):\n",
    "    print('get_pose_params...')\n",
    "    # pose rescale \n",
    "    w_min_all, w_max_all, h_min_all, h_max_all = [], [], [], []\n",
    "    mid_all = []\n",
    "    for num, detected_pose in enumerate(detected_poses):\n",
    "        detected_poses[num]['num'] = num\n",
    "        candidate_body = detected_pose['bodies']['candidate']\n",
    "        score_body = detected_pose['bodies']['score']\n",
    "        candidate_face = detected_pose['faces']\n",
    "        score_face = detected_pose['faces_score']\n",
    "        candidate_hand = detected_pose['hands']\n",
    "        score_hand = detected_pose['hands_score']\n",
    "\n",
    "        # face\n",
    "        if candidate_face.shape[0] > 1:\n",
    "            index = 0\n",
    "            candidate_face = candidate_face[index]\n",
    "            score_face = score_face[index]\n",
    "            detected_poses[num]['faces'] = candidate_face.reshape(1, candidate_face.shape[0], candidate_face.shape[1])\n",
    "            detected_poses[num]['faces_score'] = score_face.reshape(1, score_face.shape[0])\n",
    "        else:\n",
    "            candidate_face = candidate_face[0]\n",
    "            score_face = score_face[0]\n",
    "\n",
    "        # body\n",
    "        if score_body.shape[0] > 1:\n",
    "            tmp_score = []\n",
    "            for k in range(0, score_body.shape[0]):\n",
    "                tmp_score.append(score_body[k].mean())\n",
    "            index = np.argmax(tmp_score)\n",
    "            candidate_body = candidate_body[index*18:(index+1)*18,:]\n",
    "            score_body = score_body[index]\n",
    "            score_hand = score_hand[(index*2):(index*2+2),:]\n",
    "            candidate_hand = candidate_hand[(index*2):(index*2+2),:,:]\n",
    "        else:\n",
    "            score_body = score_body[0]\n",
    "        all_pose = np.concatenate((candidate_body, candidate_face))\n",
    "        all_score = np.concatenate((score_body, score_face))\n",
    "        all_pose = all_pose[all_score>0.8]\n",
    "\n",
    "        body_pose = np.concatenate((candidate_body,))\n",
    "        mid_ = body_pose[1, 0]\n",
    "\n",
    "        face_pose = candidate_face\n",
    "        hand_pose = candidate_hand\n",
    "\n",
    "        h_min, h_max = np.min(face_pose[:,1]), np.max(body_pose[:7,1])\n",
    "\n",
    "        h_ = h_max - h_min\n",
    "        \n",
    "        mid_w = mid_\n",
    "        w_min = mid_w - h_ // 2\n",
    "        w_max = mid_w + h_ // 2\n",
    "        \n",
    "        w_min_all.append(w_min)\n",
    "        w_max_all.append(w_max)\n",
    "        h_min_all.append(h_min)\n",
    "        h_max_all.append(h_max)\n",
    "        mid_all.append(mid_w)\n",
    "\n",
    "    w_min = np.min(w_min_all)\n",
    "    w_max = np.max(w_max_all)\n",
    "    h_min = np.min(h_min_all)\n",
    "    h_max = np.max(h_max_all)\n",
    "    mid = np.mean(mid_all)\n",
    "\n",
    "    margin_ratio = 0.25\n",
    "    h_margin = (h_max-h_min)*margin_ratio\n",
    "    \n",
    "    h_min = max(h_min-h_margin*0.65, 0)\n",
    "    h_max = min(h_max+h_margin*0.05, 1)\n",
    "\n",
    "    h_new = h_max - h_min\n",
    "    \n",
    "    h_min_real = int(h_min*height)\n",
    "    h_max_real = int(h_max*height)\n",
    "    mid_real = int(mid*width)\n",
    "    \n",
    "    height_new = h_max_real-h_min_real+1\n",
    "    width_new = height_new\n",
    "    w_min_real = mid_real - width_new // 2\n",
    "    if w_min_real < 0:\n",
    "      w_min_real = 0\n",
    "      width_new = mid_real * 2\n",
    "\n",
    "    w_max_real = w_min_real + width_new\n",
    "    w_min = w_min_real / width\n",
    "    w_max = w_max_real / width\n",
    "\n",
    "    imh_new, imw_new, rb, re, cb, ce = resize_and_pad_param(height_new, width_new, max_size)\n",
    "    res = {'draw_pose_params': [imh_new, imw_new, rb, re, cb, ce], \n",
    "           'pose_params': [w_min, w_max, h_min, h_max],\n",
    "           'video_params': [h_min_real, h_max_real, w_min_real, w_max_real],\n",
    "           }\n",
    "    return res\n",
    "\n",
    "def save_pose_params_item(input_items):\n",
    "    detected_pose, pose_params, draw_pose_params, save_dir = input_items\n",
    "    w_min, w_max, h_min, h_max = pose_params\n",
    "    num = detected_pose['num']\n",
    "    candidate_body = detected_pose['bodies']['candidate']\n",
    "    candidate_face = detected_pose['faces'][0]\n",
    "    candidate_hand = detected_pose['hands']\n",
    "    candidate_body[:,0] = (candidate_body[:,0]-w_min)/(w_max-w_min)\n",
    "    candidate_body[:,1] = (candidate_body[:,1]-h_min)/(h_max-h_min)\n",
    "    candidate_face[:,0] = (candidate_face[:,0]-w_min)/(w_max-w_min)\n",
    "    candidate_face[:,1] = (candidate_face[:,1]-h_min)/(h_max-h_min)\n",
    "    candidate_hand[:,:,0] = (candidate_hand[:,:,0]-w_min)/(w_max-w_min)\n",
    "    candidate_hand[:,:,1] = (candidate_hand[:,:,1]-h_min)/(h_max-h_min)\n",
    "    detected_pose['bodies']['candidate'] = candidate_body\n",
    "    detected_pose['faces'] = candidate_face.reshape(1, candidate_face.shape[0], candidate_face.shape[1])\n",
    "    detected_pose['hands'] = candidate_hand\n",
    "    detected_pose['draw_pose_params'] = draw_pose_params\n",
    "    np.save(save_dir+'/'+str(num)+'.npy', detected_pose)\n",
    "\n",
    "def save_pose_params(detected_poses, pose_params, draw_pose_params, ori_video_path):\n",
    "    save_dir = ori_video_path.replace('video', 'pose/')\n",
    "    if not os.path.exists(save_dir):\n",
    "        os.makedirs(save_dir)\n",
    "\n",
    "    input_list = []\n",
    "    \n",
    "    for i, detected_pose in enumerate(detected_poses):\n",
    "        input_list.append([detected_pose, pose_params, draw_pose_params, save_dir])\n",
    "\n",
    "    pool = ThreadPool(8)\n",
    "    pool.map(save_pose_params_item, input_list)\n",
    "    pool.close()\n",
    "    pool.join()\n",
    "    return save_dir\n",
    "from torchvision.transforms import functional as F\n",
    "def get_img_pose(\n",
    "        img_path: str, \n",
    "        sample_stride: int=1,\n",
    "        max_frame=None):\n",
    "\n",
    "  # read input img\n",
    "  frame = cv2.imread(img_path)\n",
    "  height, width, _ = frame.shape\n",
    "  short_size = min(height, width)\n",
    "  resize_ratio = max(MAX_SIZE / short_size, 1.0)\n",
    "  frame = cv2.resize(frame, (int(resize_ratio * width), int(resize_ratio * height)))\n",
    "  height, width, _ = frame.shape\n",
    "  detected_poses = [dwprocessor(frame)]\n",
    "  dwprocessor.release_memory()\n",
    "\n",
    "  return detected_poses, height, width, frame\n",
    "\n",
    "def save_aligned_img(ori_frame, video_params, max_size):\n",
    "  h_min_real, h_max_real, w_min_real, w_max_real = video_params\n",
    "  img = ori_frame[h_min_real:h_max_real,w_min_real:w_max_real,:]\n",
    "  img_aligened = resize_and_pad(img, max_size=max_size)\n",
    "  print('aligned img shape:', img_aligened.shape)\n",
    "  save_dir = './assets/refimg_aligned'\n",
    "\n",
    "  os.makedirs(save_dir, exist_ok=True)\n",
    "  save_path = os.path.join(save_dir, 'aligned.png')\n",
    "  cv2.imwrite(save_path, img_aligened)\n",
    "  return save_path\n",
    "\n",
    "detected_poses, height, width, ori_frame = get_img_pose(refimg_path, max_frame=None)\n",
    "res_params = get_pose_params(detected_poses, MAX_SIZE)\n",
    "refimg_aligned_path = save_aligned_img(ori_frame, res_params['video_params'], MAX_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3e3a34f9",
   "metadata": {},
   "source": [
    "## Input driving video if used"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "43d58644-7618-4ebb-8358-eb5afa2d5dfd",
   "metadata": {
    "execution": {},
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "video_dir = './assets/halfbody_demo/'\n",
    "video_name = 'video'"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c13abf99-6af2-4d57-a348-99020e23b3f1",
   "metadata": {
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "source": [
    "## Extract pose from driving video"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "987e61f8-ddd7-48b1-8738-6b404ac7a6dc",
   "metadata": {
    "execution": {},
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "if using_video_driving:\n",
    "  base_dir = video_dir\n",
    "  tasks = [video_name]\n",
    "  visualization = False\n",
    "  for sub_task in tasks:\n",
    "    ori_list = os.listdir(base_dir+sub_task)\n",
    "    new_dir = base_dir + sub_task+'_24fps'\n",
    "    if not os.path.exists(new_dir):\n",
    "        os.makedirs(new_dir)\n",
    "    index = 1\n",
    "    for i, mp4_file in enumerate(ori_list):\n",
    "      ori_video_path = base_dir + sub_task+'/'+mp4_file\n",
    "      if ori_video_path[-3:]=='mp4':\n",
    "        try:\n",
    "          # convert to 24fps\n",
    "          ori_video_path_new = ori_video_path.replace(sub_task, sub_task+'_24fps')\n",
    "          if '.MOV' in ori_video_path_new:\n",
    "              ori_video_path_new.replace('.MOV', '.mp4')\n",
    "          convert_fps(ori_video_path, ori_video_path_new)\n",
    "          # extract pose\n",
    "          detected_poses, height, width, ori_frames = get_video_pose(ori_video_path_new, max_frame=None)\n",
    "          # parameterize pose\n",
    "          res_params = get_pose_params(detected_poses, MAX_SIZE)\n",
    "          # save pose to npy\n",
    "          pose_path = save_pose_params(detected_poses, res_params['pose_params'], res_params['draw_pose_params'], ori_video_path)\n",
    "          \n",
    "          index += 1\n",
    "            \n",
    "        except:\n",
    "          print(\"extract crash!\")\n",
    "          continue \n",
    "\n",
    "    print([\"All Finished\", sub_task, start, end])\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "65f8ba95-dc2f-4d85-8834-8259933ca8d5",
   "metadata": {
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "source": [
    "## Imports"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "948cb9bf-ea25-459c-b7a0-096093e3f038",
   "metadata": {
    "execution": {},
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "import argparse\n",
    "import os\n",
    "import random\n",
    "from datetime import datetime\n",
    "from pathlib import Path\n",
    "\n",
    "import numpy as np\n",
    "import torch\n",
    "from diffusers import AutoencoderKL, DDIMScheduler\n",
    "from einops import repeat\n",
    "from omegaconf import OmegaConf\n",
    "from PIL import Image\n",
    "import sys\n",
    "\n",
    "from src.models.unet_2d_condition import UNet2DConditionModel\n",
    "from src.models.unet_3d_emo import EMOUNet3DConditionModel\n",
    "from src.models.whisper.audio2feature import load_audio_model\n",
    "from src.pipelines.pipeline_echomimicv2 import EchoMimicV2Pipeline\n",
    "from src.utils.util import save_videos_grid\n",
    "from src.models.pose_encoder import PoseEncoder\n",
    "from src.utils.dwpose_util import draw_pose_select_v2\n",
    "\n",
    "from decord import VideoReader\n",
    "from moviepy.editor import VideoFileClip, AudioFileClip\n",
    "\n",
    "os.environ['FFMPEG_PATH'] = './ffmpeg-4.4-amd64-static'\n",
    "ffmpeg_path = os.getenv('FFMPEG_PATH')\n",
    "\n",
    "if ffmpeg_path is None:\n",
    "    print(\"please download ffmpeg-static and export to FFMPEG_PATH. \\nFor example: export FFMPEG_PATH=./ffmpeg-4.4-amd64-static\")\n",
    "elif ffmpeg_path not in os.getenv('PATH'):\n",
    "    print(\"add ffmpeg to path\")\n",
    "    os.environ[\"PATH\"] = f\"{ffmpeg_path}:{os.environ['PATH']}\"\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d30c9eff-2976-4840-9d1d-c86eec06add4",
   "metadata": {
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "source": [
    "## Initialize the models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fe46b83e-4e14-446e-bdde-01f663629d30",
   "metadata": {
    "execution": {},
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "parser = argparse.ArgumentParser()\n",
    "parser.add_argument(\"--config\", type=str, default=\"./configs/prompts/infer.yaml\")\n",
    "parser.add_argument(\"-W\", type=int, default=768)\n",
    "parser.add_argument(\"-H\", type=int, default=768)\n",
    "parser.add_argument(\"-L\", type=int, default=240)\n",
    "parser.add_argument(\"--seed\", type=int, default=3407)\n",
    "\n",
    "parser.add_argument(\"--context_frames\", type=int, default=12)\n",
    "parser.add_argument(\"--context_overlap\", type=int, default=3)\n",
    "\n",
    "parser.add_argument(\"--cfg\", type=float, default=2.5)\n",
    "parser.add_argument(\"--steps\", type=int, default=30)\n",
    "parser.add_argument(\"--sample_rate\", type=int, default=16000)\n",
    "parser.add_argument(\"--fps\", type=int, default=24)\n",
    "parser.add_argument(\"--device\", type=str, default=\"cuda\")\n",
    "parser.add_argument(\"--ref_images_dir\", type=str, default=f'./assets/halfbody_demo/refimag')\n",
    "parser.add_argument(\"--pose_dir\", type=str, default=None)\n",
    "parser.add_argument(\"--refimg_name\", type=str, default='natural_bk_openhand/0035.png')\n",
    "parser.add_argument(\"--pose_name\", type=str, default=\"01\")\n",
    "parser.add_argument(\"--video_dir\", type=str, default=\"./assets/halfbody_demo/video\")\n",
    "\n",
    "args, _ = parser.parse_known_args()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dcfc9840-8390-4b78-8365-a53db438bcc9",
   "metadata": {
    "execution": {},
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "config = OmegaConf.load(args.config)\n",
    "if config.weight_dtype == \"fp16\":\n",
    "    weight_dtype = torch.float16\n",
    "else:\n",
    "    weight_dtype = torch.float32\n",
    "\n",
    "device = args.device\n",
    "if device.__contains__(\"cuda\") and not torch.cuda.is_available():\n",
    "    device = \"cpu\"\n",
    "\n",
    "inference_config_path = config.inference_config\n",
    "infer_config = OmegaConf.load(inference_config_path)\n",
    "\n",
    "model_flag = '{}-iter{}'.format(config.motion_module_path.split('/')[-2], config.motion_module_path.split('/')[-1].split('-')[-1][:-4])\n",
    "save_dir = Path(f\"outputs/{model_flag}-seed{args.seed}/\")\n",
    "save_dir.mkdir(exist_ok=True, parents=True)\n",
    "print(save_dir)\n",
    "\n",
    "############# model_init started #############\n",
    "## vae init\n",
    "vae = AutoencoderKL.from_pretrained(\n",
    "    config.pretrained_vae_path,).to(device, dtype=weight_dtype)\n",
    "\n",
    "## reference net init\n",
    "reference_unet = UNet2DConditionModel.from_pretrained(\n",
    "    config.pretrained_base_model_path,\n",
    "    subfolder=\"unet\",).to(dtype=weight_dtype, device=device)\n",
    "reference_unet.load_state_dict(\n",
    "    torch.load(config.reference_unet_path, map_location=\"cpu\"),)\n",
    "\n",
    "## denoising net init\n",
    "if os.path.exists(config.motion_module_path):\n",
    "    print('using motion module')\n",
    "else:\n",
    "    exit(\"motion module not found\")\n",
    "    ### stage1 + stage2\n",
    "denoising_unet = EMOUNet3DConditionModel.from_pretrained_2d(\n",
    "    config.pretrained_base_model_path,\n",
    "    config.motion_module_path,\n",
    "    subfolder=\"unet\",\n",
    "    unet_additional_kwargs=infer_config.unet_additional_kwargs,).to(dtype=weight_dtype, device=device)\n",
    "\n",
    "denoising_unet.load_state_dict(\n",
    "    torch.load(config.denoising_unet_path, map_location=\"cpu\"),\n",
    "    strict=False)\n",
    "\n",
    "# pose net init\n",
    "pose_net = PoseEncoder(320, conditioning_channels=3, block_out_channels=(16, 32, 96, 256)).to(\n",
    "    dtype=weight_dtype, device=device)\n",
    "pose_net.load_state_dict(torch.load(config.pose_encoder_path))\n",
    "\n",
    "### load audio processor params\n",
    "audio_processor = load_audio_model(model_path=config.audio_model_path, device=device)\n",
    "\n",
    "############# model_init finished #############\n",
    "width, height = 768, 768 # fixed size\n",
    "sched_kwargs = OmegaConf.to_container(infer_config.noise_scheduler_kwargs)\n",
    "scheduler = DDIMScheduler(**sched_kwargs)\n",
    "\n",
    "pipe = EchoMimicV2Pipeline(\n",
    "    vae=vae,\n",
    "    reference_unet=reference_unet,\n",
    "    denoising_unet=denoising_unet,\n",
    "    audio_guider=audio_processor,\n",
    "    pose_encoder=pose_net,\n",
    "    scheduler=scheduler,)\n",
    "\n",
    "pipe = pipe.to(device, dtype=weight_dtype)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "acb3af19-2df5-4923-b4bf-4402773d864d",
   "metadata": {
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "source": [
    "## Animating half-body human video"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "13234468-fac0-49d0-b4cf-8fe9666e3a68",
   "metadata": {
    "execution": {},
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "if args.seed is not None and args.seed > -1:\n",
    "  generator = torch.manual_seed(args.seed)\n",
    "else:\n",
    "  generator = torch.manual_seed(random.randint(100, 1000000))\n",
    "\n",
    "final_fps = args.fps\n",
    "\n",
    "inputs_dict = {\n",
    "    \"refimg\": refimg_aligned_path,\n",
    "    \"audio\": audio_path,\n",
    "    \"pose\": pose_path,\n",
    "}\n",
    "\n",
    "start_idx = 0\n",
    "\n",
    "print('Pose:', inputs_dict['pose'])\n",
    "print('Reference:', inputs_dict['refimg'])\n",
    "print('Audio:', inputs_dict['audio'])\n",
    "audio_name = inputs_dict['audio'].split('/')[-1].split('.')[0]\n",
    "\n",
    "ref_flag = '.'.join([inputs_dict['refimg'].split('/')[-2], inputs_dict['refimg'].split('/')[-1]])\n",
    "save_path = Path(f\"outputs\")\n",
    "\n",
    "save_path.mkdir(exist_ok=True, parents=True)\n",
    "ref_s = inputs_dict['refimg'].split('/')[-1].split('.')[0]\n",
    "save_name = f\"{save_path}/{ref_s}-a-{audio_name}-i{start_idx}\"\n",
    "\n",
    "ref_image_pil = Image.open(inputs_dict['refimg']).resize((args.W, args.H))\n",
    "audio_clip = AudioFileClip(inputs_dict['audio'])\n",
    "\n",
    "args.L = min(int(audio_clip.duration * final_fps), len(os.listdir(inputs_dict['pose'])))\n",
    "\n",
    "pose_list = []\n",
    "for index in range(start_idx, start_idx + args.L):\n",
    "    tgt_musk = np.zeros((args.W, args.H, 3)).astype('uint8')\n",
    "    tgt_musk_path = os.path.join(inputs_dict['pose'], \"{}.npy\".format(index))\n",
    "    detected_pose = np.load(tgt_musk_path, allow_pickle=True).tolist()\n",
    "    imh_new, imw_new, rb, re, cb, ce = detected_pose['draw_pose_params']\n",
    "    im = draw_pose_select_v2(detected_pose, imh_new, imw_new, ref_w=800)\n",
    "    im = np.transpose(np.array(im),(1, 2, 0))\n",
    "    tgt_musk[rb:re,cb:ce,:] = im\n",
    "\n",
    "    tgt_musk_pil = Image.fromarray(np.array(tgt_musk)).convert('RGB')\n",
    "    pose_list.append(torch.Tensor(np.array(tgt_musk_pil)).to(dtype=weight_dtype, device=device).permute(2,0,1) / 255.0)\n",
    "\n",
    "poses_tensor = torch.stack(pose_list, dim=1).unsqueeze(0)\n",
    "audio_clip = AudioFileClip(inputs_dict['audio'])\n",
    "width, height = 768, 768\n",
    "audio_clip = audio_clip.set_duration(args.L / final_fps)\n",
    "video = pipe(\n",
    "    ref_image_pil,\n",
    "    inputs_dict['audio'],\n",
    "    poses_tensor[:,:,:args.L,...],\n",
    "    width,\n",
    "    height,\n",
    "    args.L,\n",
    "    args.steps,\n",
    "    args.cfg,\n",
    "    generator=generator,\n",
    "    audio_sample_rate=args.sample_rate,\n",
    "    context_frames=args.context_frames,\n",
    "    fps=final_fps,\n",
    "    context_overlap=args.context_overlap,\n",
    "    start_idx=start_idx,\n",
    ").videos \n",
    "\n",
    "final_length = min(video.shape[2], poses_tensor.shape[2], args.L)\n",
    "video_sig = video[:, :, :final_length, :, :]\n",
    "\n",
    "save_videos_grid(\n",
    "    video_sig,\n",
    "    save_name + \"_woa_sig.mp4\",\n",
    "    n_rows=1,\n",
    "    fps=final_fps,\n",
    ")\n",
    "\n",
    "video_clip_sig = VideoFileClip(save_name + \"_woa_sig.mp4\",)\n",
    "video_clip_sig = video_clip_sig.set_audio(audio_clip)\n",
    "video_clip_sig.write_videofile(save_name + \"_sig.mp4\", codec=\"libx264\", audio_codec=\"aac\", threads=2)\n",
    "os.system(\"rm {}\".format(save_name + \"_woa_sig.mp4\"))\n",
    "print(save_name)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3b7dbb28",
   "metadata": {},
   "source": [
    "## Display video"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aa341db6-81ff-4cf0-8e89-41e92170a0d9",
   "metadata": {
    "execution": {},
    "libroFormatter": "formatter-string",
    "trusted": true
   },
   "outputs": [],
   "source": [
    "from IPython.display import display, Video\n",
    "display(Video(filename=save_name + \"_sig.mp4\"))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "infer",
   "name": "infer"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
