#%%
from ezds.ezdlearn.config import load_config
from models.DINet import DINet
import time, dlib

import numpy as np
import glob
import os
import cv2, csv
import torch
import subprocess
import random
from collections import OrderedDict

#%%
from ezds.ezdlearn.utils import visual as vt
import whisper
from torch.nn import functional as F

#%%

def load_landmark_openface(csv_path):
    '''
    load openface landmark from .csv file
    '''
    with open(csv_path, 'r') as f:
        reader = csv.reader(f)
        data_all = [row for row in reader]
    x_list = []
    y_list = []
    for row_index,row in enumerate(data_all[1:]):
        frame_num = float(row[0])
        if int(frame_num)!= row_index+1:
            return None
        x_list.append([float(x) for x in row[5:5+68]])
        y_list.append([float(y) for y in row[5+68:5+68 + 68]])
    x_array = np.array(x_list)
    y_array = np.array(y_list)
    landmark_array = np.stack([x_array,y_array],2)
    return landmark_array

def whisper_encode(model, x):
    x = F.gelu(model.conv1(x))
    x = F.gelu(model.conv2(x))
    x = x.permute(0, 2, 1)
    pe = model.positional_embedding[:x.shape[1], :]
    x = (x + pe).to(x.dtype)
    for block in model.blocks:
        x = block(x)
    x = model.ln_post(x)
    return x

def split_array(arr, split_size):
    n = max(len(arr) // split_size, 1)
    split_ns = [split_size * (i+1) for i in range(n)]
    outputs = np.array_split(arr,  split_ns)
    if len(outputs[-1]) == 0:
        outputs = outputs[:-1]
    return outputs

def compute_crop_radius(video_size,landmark_data_clip,random_scale = None):
    '''
    judge if crop face and compute crop radius
    '''
    video_w, video_h = video_size[0], video_size[1]
    landmark_max_clip = np.max(landmark_data_clip, axis=1)
    if random_scale is None:
        random_scale = random.random() / 10 + 1.05
    else:
        random_scale = random_scale
    radius_h = (landmark_max_clip[:,1] - landmark_data_clip[:,29, 1]) * random_scale
    radius_w = (landmark_data_clip[:,54, 0] - landmark_data_clip[:,48, 0]) * random_scale
    radius_clip = np.max(np.stack([radius_h, radius_w],1),1) // 2
    radius_max = np.max(radius_clip)
    radius_max = (np.int(radius_max/4) + 1 ) * 4
    radius_max_1_4 = radius_max//4
    clip_min_h = landmark_data_clip[:, 29,
                 1] - radius_max
    clip_max_h = landmark_data_clip[:, 29,
                 1] + radius_max * 2  + radius_max_1_4
    clip_min_w = landmark_data_clip[:, 33,
                 0] - radius_max - radius_max_1_4
    clip_max_w = landmark_data_clip[:, 33,
                 0] + radius_max + radius_max_1_4
    if min(clip_min_h.tolist() + clip_min_w.tolist()) < 0:
        return False,None
    elif max(clip_max_h.tolist()) > video_h:
        return False,None
    elif max(clip_max_w.tolist()) > video_w:
        return False,None
    elif max(radius_clip) > min(radius_clip) * 1.5:
        return False, None
    else:
        return True,radius_max

def get_landmarks(im, idx=0):
    rects = detector(im, 0)
    rect = rects[0]
    if len(rects) > 1:
        rect = rects[idx]
    if len(rects) == 0:
        return None
    if hasattr(rect, 'rect'):
        rect = rect.rect
    return np.array([[p.x, p.y] for p in predictor(im, rect).parts()])

def extract_frames_from_video(video_path,save_dir):
    videoCapture = cv2.VideoCapture(video_path)
    fps = videoCapture.get(cv2.CAP_PROP_FPS)
    if int(fps) != 25:
        print('warning: the input video is not 25 fps, it would be better to trans it to 25 fps!')
    frames = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_height = videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    frame_width = videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)
    for i in range(int(frames)):
        ret, frame = videoCapture.read()
        result_path = os.path.join(save_dir, str(i).zfill(6) + '.jpg')
        cv2.imwrite(result_path, frame)
    return (int(frame_width),int(frame_height)), fps

#%% load config
config = load_config('configs/DINet_frame.yaml')
source_video_path               =   "../DINet/asserts/examples/test1.mp4"
source_openface_landmark_path   =   "../DINet/asserts/examples/test1.csv"
driving_audio_path              =   "../DINet/asserts/examples/driving_audio_3.wav"
deepspeech_model_path           =   "../DINet/asserts/output_graph.pb"
res_video_dir                   =   "../DINet/asserts/inference_result"
pretrained_clip_DINet_path      =   "ckps/DINet/G_DINet_202307231459.pth.375"
#%% load config from parser
source_channel     =   3
ref_channel        =   15
audio_channel      =   384
mouth_region_size  =   256
fps_audio          =   50
fps_video          =   25
#%% export dlib model
detector_path = "../DINet/asserts/mmod_human_face_detector.dat"
predictor_path = "../DINet/asserts/shape_predictor_68_face_landmarks.dat"
detector = dlib.cnn_face_detection_model_v1(detector_path)
predictor = dlib.shape_predictor(predictor_path)
#%% extract frames from source video
video_frame_dir = source_video_path.replace('.mp4', '')
if not os.path.exists(video_frame_dir):
    os.mkdir(video_frame_dir)
video_size, _ = extract_frames_from_video(source_video_path, video_frame_dir)
model = whisper.load_model('tiny').eval()
audio = whisper.load_audio(driving_audio_path)
audios = split_array(audio, 480000)
mels = [whisper.log_mel_spectrogram(audio) for audio in audios]
#%%
with torch.no_grad():
    embeds = [whisper_encode(model.encoder, mel[None, ...].cuda()) for mel in mels]
    ds_feature = torch.cat(embeds, dim=1).squeeze(0).cpu().numpy()
res_frame_length = int(ds_feature.shape[0] / fps_audio * fps_video)
ds_feature_padding = np.pad(ds_feature, ((2, 2), (0, 0)), mode='edge')
#%% align frame with driving audio
video_landmark_data = load_landmark_openface(source_openface_landmark_path).astype(np.int)
video_frame_path_list = glob.glob(os.path.join(video_frame_dir, '*.jpg'))
if len(video_frame_path_list) != video_landmark_data.shape[0]:
    raise ('video frames are misaligned with detected landmarks')
video_frame_path_list.sort()
video_frame_path_list_cycle = video_frame_path_list + video_frame_path_list[::-1]
video_landmark_data_cycle = np.concatenate([video_landmark_data, np.flip(video_landmark_data, 0)], 0)
video_frame_path_list_cycle_length = len(video_frame_path_list_cycle)
if video_frame_path_list_cycle_length >= res_frame_length:
    res_video_frame_path_list = video_frame_path_list_cycle[:res_frame_length]
    res_video_landmark_data = video_landmark_data_cycle[:res_frame_length, :, :]
else:
    divisor = res_frame_length // video_frame_path_list_cycle_length
    remainder = res_frame_length % video_frame_path_list_cycle_length
    res_video_frame_path_list = video_frame_path_list_cycle * divisor + video_frame_path_list_cycle[:remainder]
    res_video_landmark_data = np.concatenate([video_landmark_data_cycle]* divisor + [video_landmark_data_cycle[:remainder, :, :]],0)
res_video_frame_path_list_pad = [video_frame_path_list_cycle[0]] * 2 \
                                + res_video_frame_path_list \
                                + [video_frame_path_list_cycle[-1]] * 2
res_video_landmark_data_pad = np.pad(res_video_landmark_data, ((2, 2), (0, 0), (0, 0)), mode='edge')
# assert ds_feature_padding.shape[0] == len(res_video_frame_path_list_pad) == res_video_landmark_data_pad.shape[0]
pad_length = ds_feature_padding.shape[0]
#%% randomly select 5 reference images
print('selecting five reference images')
ref_img_list = []
resize_w = int(mouth_region_size + mouth_region_size // 4)
resize_h = int((mouth_region_size // 2) * 3 + mouth_region_size // 8)
ref_index_list = random.sample(range(5, len(res_video_frame_path_list_pad) - 2), 5)
for ref_index in ref_index_list:
    crop_flag,crop_radius = compute_crop_radius(video_size,res_video_landmark_data_pad[ref_index - 5:ref_index, :, :])
    if not crop_flag:
        raise ('our method can not handle videos with large change of facial size!!')
    crop_radius_1_4 = crop_radius // 4
    ref_img = cv2.imread(res_video_frame_path_list_pad[ref_index- 3])[:, :, ::-1]
    ref_landmark = res_video_landmark_data_pad[ref_index - 3, :, :]
    ref_img_crop = ref_img[
                ref_landmark[29, 1] - crop_radius:ref_landmark[29, 1] + crop_radius * 2 + crop_radius_1_4,
                ref_landmark[33, 0] - crop_radius - crop_radius_1_4:ref_landmark[33, 0] + crop_radius +crop_radius_1_4,
                :]
    ref_img_crop = cv2.resize(ref_img_crop,(resize_w,resize_h))
    ref_img_crop = ref_img_crop / 255.0
    ref_img_list.append(ref_img_crop)
ref_video_frame = np.concatenate(ref_img_list, 2)
ref_img_tensor = torch.from_numpy(ref_video_frame).permute(2, 0, 1).unsqueeze(0).float().cuda()
#%% load pretrained model weight
print('loading pretrained model from: {}'.format(pretrained_clip_DINet_path))
model = DINet(config)
if not os.path.exists(pretrained_clip_DINet_path):
    raise ('wrong path of pretrained model weight: {}'.format(pretrained_clip_DINet_path))
# state_dict = torch.load(pretrained_clip_DINet_path)['state_dict']['net_g']
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
#     name = k[7:]  # remove module.
#     new_state_dict[name] = v
# new_state_dict.pop('audio_encoder.0.conv.weight')
# model.load_state_dict(new_state_dict, strict=False)
# model.load(pretrained_clip_DINet_path)
model.load(pretrained_clip_DINet_path)
model = model.no_grad_eval().cuda()
#%% inference frame by frame
if not os.path.exists(res_video_dir):
    os.mkdir(res_video_dir)
res_video_path = os.path.join(res_video_dir,os.path.basename(source_video_path)[:-4] + '_facial_dubbing.mp4')
if os.path.exists(res_video_path):
    os.remove(res_video_path)
res_face_path = res_video_path.replace('_facial_dubbing.mp4', '_synthetic_face.mp4')
if os.path.exists(res_face_path):
    os.remove(res_face_path)

# init a timer
all_landmarks = []
all_radius = []
landmarks_cache = []
frame_data_cache = []
#%%
for clip_end_index in range(res_frame_length):
    print('synthesizing {}/{} frame'.format(clip_end_index+1, res_frame_length))
    frame_data = cv2.imread(res_video_frame_path_list_pad[clip_end_index])[:, :, ::-1]
    # frame_data = cv2.imread(res_video_frame_path_list_pad[clip_end_index - 3])[:, :, ::-1]
    frame_landmark = get_landmarks(frame_data)
    # cache landmarks and frame data
    landmarks_cache.append(frame_landmark)
    all_landmarks.append(frame_landmark)
    if len(landmarks_cache) > 5:
        landmarks_cache.pop(0)
    frame_data_cache.append(frame_data)
    if len(frame_data_cache) > 3:
        frame_data = frame_data_cache.pop(0)
    if clip_end_index < 5:
        continue
    # frame_landmark = res_video_landmark_data_pad[clip_end_index - 3, :, :]
    landmarks_stack = np.stack(landmarks_cache, 0)
    frame_landmark = np.round(landmarks_stack.mean(axis=0)).astype(int)
    # get radius of crop region
    crop_flag, crop_radius = compute_crop_radius(video_size,
                                                 landmarks_stack,
                                                #  res_video_landmark_data_pad[clip_end_index - 5:clip_end_index, :, :],
                                                 random_scale = 1.05)
    all_radius.append(crop_radius)
    if not crop_flag:
        raise ('our method can not handle videos with large change of facial size!!')
    crop_radius_1_4 = crop_radius // 4
    # crop the face
    crop_frame_data = frame_data[
                        frame_landmark[29, 1] - crop_radius:frame_landmark[29, 1] + crop_radius * 2 + crop_radius_1_4,
                        frame_landmark[33, 0] - crop_radius - crop_radius_1_4:frame_landmark[33, 0] + crop_radius+crop_radius_1_4,
                        :]
    crop_frame_h,crop_frame_w = crop_frame_data.shape[0],crop_frame_data.shape[1]
    crop_frame_data = cv2.resize(crop_frame_data, (resize_w,resize_h))  # [32:224, 32:224, :]
    crop_frame_data = crop_frame_data / 255.0
    # mask mouth region
    crop_frame_data[mouth_region_size//2:mouth_region_size//2 + mouth_region_size,
                    mouth_region_size//8:mouth_region_size//8 + mouth_region_size, :] = 0

    crop_frame_tensor = torch.from_numpy(crop_frame_data).float().cuda().permute(2, 0, 1).unsqueeze(0)
    # deepspeech_tensor = torch.from_numpy(ds_feature_padding[clip_end_index - 5:clip_end_index, :]).permute(1, 0).unsqueeze(0).float().cuda()
    frame_audio_idx = int(clip_end_index * fps_audio // fps_video)
    deepspeech_tensor = torch.from_numpy(ds_feature_padding[max(frame_audio_idx - 10, 0):frame_audio_idx, :]).permute(1, 0).unsqueeze(0).float().cuda()
    # deepspeech tensor shape [1, 29, 5]
    # deepspeech_tensor = torch.zeros(1, 29, 5).cuda()
    break
with torch.no_grad():
    source_img, ref_img, audio_feature = crop_frame_tensor, ref_img_tensor, deepspeech_tensor
    source_in_feature = model.source_in_conv(source_img)
    ## reference image encoder
    ref_in_feature = model.ref_in_conv(ref_img)
    ## alignment encoder
    img_para = model.trans_conv(torch.cat([source_in_feature,ref_in_feature],1))
    img_para = model.global_avg2d(img_para).squeeze(3).squeeze(2)
    ## audio encoder
    audio_para = model.audio_encoder(audio_feature)
    audio_para = model.global_avg1d(audio_para).squeeze(2)
    ## concat alignment feature and audio feature
    trans_para = torch.cat([img_para,audio_para],1)
    ## use AdaAT do spatial deformation on reference feature maps
    ref_trans_feature0 = model.appearance_conv_list[0](ref_in_feature)
    ref_trans_feature1 = model.adaAT(ref_trans_feature0, trans_para)
    ref_trans_feature2 = model.appearance_conv_list[1](ref_trans_feature1)
    ## feature decoder
    merge_feature = torch.cat([source_in_feature, ref_trans_feature2],1)
    out = model.out_conv(merge_feature)

    pre_frame = out.squeeze(0).permute(1, 2, 0).detach().cpu().numpy() * 255

pre_frame_resize = cv2.resize(pre_frame, (crop_frame_w,crop_frame_h))
frame_data[
frame_landmark[29, 1] - crop_radius:
frame_landmark[29, 1] + crop_radius * 2,
frame_landmark[33, 0] - crop_radius - crop_radius_1_4:
frame_landmark[33, 0] + crop_radius + crop_radius_1_4,
:] = pre_frame_resize[:crop_radius * 3,:,:]

#%%
from matplotlib import pyplot as plt
ap = audio_para.squeeze(0).detach().cpu().numpy()
plt.plot(ap)
plt.show()

#%%






