#%%
from ezds.ezdlearn.config import load_config
from models.DINet import DINet
import time, dlib

import numpy as np
import glob
import os
import cv2, csv
import torch
import subprocess
import random
from collections import OrderedDict
from tqdm import tqdm

#%%
from ezds.ezdlearn.utils import visual as vt
from ezds.ezdlearn.utils.load import load_video_by_frame
import whisper
from torch.nn import functional as F

#%%

def whisper_encode(model, x):
    x = F.gelu(model.conv1(x))
    x = F.gelu(model.conv2(x))
    x = x.permute(0, 2, 1)
    pe = model.positional_embedding[:x.shape[1], :]
    x = (x + pe).to(x.dtype)
    for block in model.blocks:
        x = block(x)
    x = model.ln_post(x)
    return x

def split_array(arr, split_size):
    n = max(len(arr) // split_size, 1)
    split_ns = [split_size * (i+1) for i in range(n)]
    outputs = np.array_split(arr,  split_ns)
    if len(outputs[-1]) == 0:
        outputs = outputs[:-1]
    return outputs

def compute_crop_radius(video_size,landmark_data_clip,random_scale = None):
    '''
    judge if crop face and compute crop radius
    '''
    video_w, video_h = video_size[0], video_size[1]
    landmark_max_clip = np.max(landmark_data_clip, axis=1)
    if random_scale is None:
        random_scale = random.random() / 10 + 1.05
    else:
        random_scale = random_scale
    radius_h = (landmark_max_clip[:,1] - landmark_data_clip[:,29, 1]) * random_scale
    radius_w = (landmark_data_clip[:,54, 0] - landmark_data_clip[:,48, 0]) * random_scale
    radius_clip = np.max(np.stack([radius_h, radius_w],1),1) // 2
    radius_max = np.max(radius_clip)
    radius_max = (np.int(radius_max/4) + 1 ) * 4
    radius_max_1_4 = radius_max//4
    clip_min_h = landmark_data_clip[:, 29,
                 1] - radius_max
    clip_max_h = landmark_data_clip[:, 29,
                 1] + radius_max * 2  + radius_max_1_4
    clip_min_w = landmark_data_clip[:, 33,
                 0] - radius_max - radius_max_1_4
    clip_max_w = landmark_data_clip[:, 33,
                 0] + radius_max + radius_max_1_4
    if min(clip_min_h.tolist() + clip_min_w.tolist()) < 0:
        return False,None
    elif max(clip_max_h.tolist()) > video_h:
        return False,None
    elif max(clip_max_w.tolist()) > video_w:
        return False,None
    elif max(radius_clip) > min(radius_clip) * 1.5:
        return False, None
    else:
        return True,radius_max

def get_landmarks(im, idx=0):
    rects = detector(im, 0)
    rect = rects[0]
    if len(rects) > 1:
        rect = rects[idx]
    if len(rects) == 0:
        return None
    if hasattr(rect, 'rect'):
        rect = rect.rect
    return np.array([[p.x, p.y] for p in predictor(im, rect).parts()])

def extract_frames_from_video(video_path, save_dir):
    vcap = load_video_by_frame(video_path, refps=25)
    video_landmark_data = []
    fps = 25
    for i, frame in enumerate(tqdm(vcap)):
        if i==0:
            frame_height, frame_width = frame.shape[:2]
        result_path = os.path.join(save_dir, str(i).zfill(6) + '.jpg')
        frame_landmark = get_landmarks(frame)
        cv2.imwrite(result_path, frame)
        video_landmark_data.append(frame_landmark)
    video_landmark_data = np.stack(video_landmark_data, 0)
    return (int(frame_width),int(frame_height)), fps, video_landmark_data

#%% load config
config = load_config('configs/DINet_frame.yaml')
source_video_path               =   "../DINet/asserts/examples/test1.mp4"
# source_video_path               =   "../DINet/asserts/examples/VID20230515163740.mp4"
driving_audio_path              =   "../DINet/asserts/examples/driving_audio_3.wav"
# driving_audio_path              =   "../DINet/asserts/examples/audio.wav"
deepspeech_model_path           =   "../DINet/asserts/output_graph.pb"
res_video_dir                   =   "../DINet/asserts/inference_result"
pretrained_clip_DINet_path      =   "ckps/DINet/G_DINet_202307270925.pth.227815"
#%% load config from parser
source_channel     =   3
ref_channel        =   15
audio_channel      =   384
mouth_region_size  =   256
fps_audio          =   50
fps_video          =   25
#%% export dlib model
detector_path = "../DINet/asserts/mmod_human_face_detector.dat"
predictor_path = "../DINet/asserts/shape_predictor_68_face_landmarks.dat"
detector = dlib.cnn_face_detection_model_v1(detector_path)
predictor = dlib.shape_predictor(predictor_path)
#%% extract frames from source video
video_frame_dir = source_video_path.replace('.mp4', '')
if not os.path.exists(video_frame_dir):
    os.mkdir(video_frame_dir)
video_size, _, video_landmark_data = extract_frames_from_video(source_video_path, video_frame_dir)
model = whisper.load_model('tiny').eval()
audio = whisper.load_audio(driving_audio_path)
audios = split_array(audio, 480000)
mels = [whisper.log_mel_spectrogram(audio) for audio in audios]
#%%
with torch.no_grad():
    embeds = [whisper_encode(model.encoder, mel[None, ...].cuda()) for mel in mels]
    ds_feature = torch.cat(embeds, dim=1).squeeze(0).cpu().numpy()
res_frame_length = int(ds_feature.shape[0] / fps_audio * fps_video)
ds_feature_padding = np.pad(ds_feature, ((2, 2), (0, 0)), mode='edge')
#%% align frame with driving audio
video_frame_path_list = glob.glob(os.path.join(video_frame_dir, '*.jpg'))
if len(video_frame_path_list) != video_landmark_data.shape[0]:
    raise ('video frames are misaligned with detected landmarks')
video_frame_path_list.sort()
video_frame_path_list_cycle = video_frame_path_list + video_frame_path_list[::-1]
video_landmark_data_cycle = np.concatenate([video_landmark_data, np.flip(video_landmark_data, 0)], 0)
video_frame_path_list_cycle_length = len(video_frame_path_list_cycle)
if video_frame_path_list_cycle_length >= res_frame_length:
    res_video_frame_path_list = video_frame_path_list_cycle[:res_frame_length]
    res_video_landmark_data = video_landmark_data_cycle[:res_frame_length, :, :]
else:
    divisor = res_frame_length // video_frame_path_list_cycle_length
    remainder = res_frame_length % video_frame_path_list_cycle_length
    res_video_frame_path_list = video_frame_path_list_cycle * divisor + video_frame_path_list_cycle[:remainder]
    res_video_landmark_data = np.concatenate([video_landmark_data_cycle]* divisor + [video_landmark_data_cycle[:remainder, :, :]],0)
res_video_frame_path_list_pad = [video_frame_path_list_cycle[0]] * 2 \
                                + res_video_frame_path_list \
                                + [video_frame_path_list_cycle[-1]] * 2
res_video_landmark_data_pad = np.pad(res_video_landmark_data, ((2, 2), (0, 0), (0, 0)), mode='edge')
# assert ds_feature_padding.shape[0] == len(res_video_frame_path_list_pad) == res_video_landmark_data_pad.shape[0]
pad_length = ds_feature_padding.shape[0]
#%% randomly select 5 reference images
print('selecting five reference images')
ref_img_list = []
resize_w = int(mouth_region_size + mouth_region_size // 4)
resize_h = int((mouth_region_size // 2) * 3 + mouth_region_size // 8)
ref_index_list = random.sample(range(5, len(res_video_frame_path_list_pad) - 2), 5)
for ref_index in ref_index_list:
    crop_flag,crop_radius = compute_crop_radius(video_size,res_video_landmark_data_pad[ref_index - 5:ref_index, :, :])
    if not crop_flag:
        raise ('our method can not handle videos with large change of facial size!!')
    crop_radius_1_4 = crop_radius // 4
    ref_img = cv2.imread(res_video_frame_path_list_pad[ref_index- 3])[:, :, ::-1]
    ref_landmark = res_video_landmark_data_pad[ref_index - 3, :, :]
    ref_img_crop = ref_img[
                ref_landmark[29, 1] - crop_radius:ref_landmark[29, 1] + crop_radius * 2 + crop_radius_1_4,
                ref_landmark[33, 0] - crop_radius - crop_radius_1_4:ref_landmark[33, 0] + crop_radius +crop_radius_1_4,
                :]
    ref_img_crop = cv2.resize(ref_img_crop,(resize_w,resize_h))
    ref_img_crop = ref_img_crop / 255.0
    ref_img_list.append(ref_img_crop)
ref_video_frame = np.concatenate(ref_img_list, 2)
ref_img_tensor = torch.from_numpy(ref_video_frame).permute(2, 0, 1).unsqueeze(0).float().cuda()
#%% load pretrained model weight
print('loading pretrained model from: {}'.format(pretrained_clip_DINet_path))
model = DINet(config)
if not os.path.exists(pretrained_clip_DINet_path):
    raise ('wrong path of pretrained model weight: {}'.format(pretrained_clip_DINet_path))
model.load(pretrained_clip_DINet_path)
model = model.no_grad_eval().cuda()
#%% inference frame by frame
if not os.path.exists(res_video_dir):
    os.mkdir(res_video_dir)
res_video_path = os.path.join(res_video_dir,os.path.basename(source_video_path)[:-4] + '_facial_dubbing.mp4')
if os.path.exists(res_video_path):
    os.remove(res_video_path)
res_face_path = res_video_path.replace('_facial_dubbing.mp4', '_synthetic_face.mp4')
if os.path.exists(res_face_path):
    os.remove(res_face_path)
videowriter = cv2.VideoWriter(res_video_path, cv2.VideoWriter_fourcc(*'XVID'), 25, video_size)
videowriter_face = cv2.VideoWriter(res_face_path, cv2.VideoWriter_fourcc(*'XVID'), 25, (resize_w, resize_h))
# init a timer
start_time = time.time()
all_landmarks = []
all_radius = []
landmarks_cache = []
frame_data_cache = []
#%%
for clip_end_index in range(5, res_frame_length):
    print('synthesizing {}/{} frame'.format(clip_end_index+1, res_frame_length))
    frame_data = cv2.imread(res_video_frame_path_list_pad[clip_end_index - 3])[:, :, ::-1]
    landmarks_stack = res_video_landmark_data_pad[clip_end_index - 5:clip_end_index, :, :]
    frame_landmark = np.round(landmarks_stack.mean(axis=0)).astype(int)
    # get radius of crop region
    crop_flag, crop_radius = compute_crop_radius(video_size,
                                                 landmarks_stack,
                                                 random_scale = 1.05)
    all_radius.append(crop_radius)
    if not crop_flag:
        raise ('our method can not handle videos with large change of facial size!!')
    crop_radius_1_4 = crop_radius // 4
    # crop the face
    crop_frame_data = frame_data[
                        frame_landmark[29, 1] - crop_radius:frame_landmark[29, 1] + crop_radius * 2 + crop_radius_1_4,
                        frame_landmark[33, 0] - crop_radius - crop_radius_1_4:frame_landmark[33, 0] + crop_radius +crop_radius_1_4,
                        :]
    crop_frame_h,crop_frame_w = crop_frame_data.shape[0],crop_frame_data.shape[1]
    crop_frame_data = cv2.resize(crop_frame_data, (resize_w,resize_h))  # [32:224, 32:224, :]
    crop_frame_data = crop_frame_data / 255.0
    # mask mouth region
    crop_frame_data[mouth_region_size//2:mouth_region_size//2 + mouth_region_size,
                    mouth_region_size//8:mouth_region_size//8 + mouth_region_size, :] = 0

    crop_frame_tensor = torch.from_numpy(crop_frame_data).float().cuda().permute(2, 0, 1).unsqueeze(0)
    # deepspeech_tensor = torch.from_numpy(ds_feature_padding[clip_end_index - 5:clip_end_index, :]).permute(1, 0).unsqueeze(0).float().cuda()
    frame_audio_idx = int(clip_end_index * fps_audio // fps_video)
    deepspeech_tensor = torch.from_numpy(ds_feature_padding[max(frame_audio_idx - 10, 0):frame_audio_idx, :]).permute(1, 0).unsqueeze(0).float().cuda()
    # deepspeech tensor shape [1, 29, 5]
    # deepspeech_tensor = torch.zeros(1, 29, 5).cuda()
    with torch.no_grad():
        pre_frame = model(crop_frame_tensor, ref_img_tensor, deepspeech_tensor)
        pre_frame = pre_frame.squeeze(0).permute(1, 2, 0).detach().cpu().numpy() * 255
    pre_frame_resize = cv2.resize(pre_frame, (crop_frame_w,crop_frame_h))
    frame_data[
    frame_landmark[29, 1] - crop_radius:
    frame_landmark[29, 1] + crop_radius * 2,
    frame_landmark[33, 0] - crop_radius - crop_radius_1_4:
    frame_landmark[33, 0] + crop_radius + crop_radius_1_4,
    :] = pre_frame_resize[:crop_radius * 3,:,:]
    # break
    videowriter_face.write(pre_frame[:, :, ::-1].copy().astype(np.uint8))
    videowriter.write(frame_data[:, :, ::-1])
videowriter.release()
videowriter_face.release()
# finish timer
end_time = time.time()
print('total time cost: {}s'.format(end_time - start_time))
print('frame per second {}s'.format((pad_length - 5) / (end_time - start_time)))
#%% add audio to synthetic video
video_add_audio_path = res_video_path.replace('.mp4', '_add_audio.mp4')
if os.path.exists(video_add_audio_path):
    os.remove(video_add_audio_path)
cmd = 'ffmpeg -i {} -i {} -c:v copy -c:a aac -strict experimental -map 0:v:0 -map 1:a:0 {}'.format(
    res_video_path,
    driving_audio_path,
    video_add_audio_path)
subprocess.call(cmd, shell=True)







