import torch
import numpy as np
import json
import random
import cv2, os
from glob import glob
import pandas as pd

from torch.utils.data import Dataset
try:
    from ezds.ezdlearn.data import BaseDataset
except:
    BaseDataset = Dataset

def get_image_list(data_root, split, filelists='preprocess_data/{}.txt'):
	filelist = []
	with open(filelists.format(split)) as f:
		for line in f:
			line = line.strip()
			if ' ' in line: line = line.split()[0]
			filelist.append(os.path.join(data_root, line))
	return filelist

def read_faces(image_path, landmarks, radius, radius_1_4, resize=None):
    if isinstance(image_path, str):
        image_path = [image_path]
    images = []
    if landmarks is None:
        landmarks = [None] * len(image_path)
    for path, landmark in zip(image_path, landmarks):
        image = cv2.imread(path)
        image = crop_face(image, landmark, radius, radius_1_4)
        if resize:
            image = cv2.resize(image, resize)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = image / 255.0
        images.append(image)
    return np.stack(images, 0)

def crop_face(img, landmark, crop_radius=128, crop_radius_1_4=32):
    if landmark is None:
         return img
    y0 = landmark[29, 1] - crop_radius
    y1 = landmark[29, 1] + crop_radius * 2 + crop_radius_1_4
    x0 = landmark[33, 0] - crop_radius - crop_radius_1_4
    x1 = landmark[33, 0] + crop_radius + crop_radius_1_4
    y0 = max(y0, 0)
    x0 = max(x0, 0)
    img_crop = img[y0:y1, x0:x1, :]
    return img_crop

class DINetDataset(BaseDataset):
    def __init__(self, config, is_train=True):
        super(DINetDataset, self).__init__(config)
        self.mouth_region_size = getattr(config, 'mouth_region_size', 256)
        self.radius = self.mouth_region_size//2
        self.radius_1_4 = self.radius//4
        self.img_h = self.radius * 3 + self.radius_1_4
        self.img_w = self.radius * 2 + self.radius_1_4 * 2
        self.dirs = get_image_list(config.data_root ,'train' if is_train else 'validation', config.filelists)
        self.is_shuffle = True if getattr(config, "length") else False
        self.length = getattr(config, "length") or len(self.dirs)
        self.clip_length = config.clip_length
        self.preindex = self.clip_length // 2
        self.postindex = self.clip_length - self.preindex
        self.fps_video = 25
        self.fps_audio = 50

    def __getitem__(self, index):
        #%%
        if self.is_shuffle:
            index = random.randint(0, self.length) % len(self.dirs)
        #%%
        pre, post = self.preindex, self.postindex
        radius, radius_1_4 = self.radius, self.radius_1_4
        audio_mult = self.fps_audio / self.fps_video
        resize = self.img_w, self.img_h
        video_name = self.dirs[index]
        #%%
        audio_feature = torch.load(os.path.join(video_name, 'audio.pkl'))['embed']
        video_frames = glob(os.path.join(video_name, '*.jpg'))
        video_frames = sorted(video_frames, key=lambda x:int(os.path.basename(x.replace('.jpg', ''))))
        # landmarks = pd.read_csv(os.path.join(video_name, 'landmarks.csv'), index_col=0).values.reshape(-1, 2, 68).transpose(0, 2, 1)
        video_clip_num = min(len(video_frames), int(len(audio_feature) / audio_mult))
        source_index = random.sample(range(2 * pre, video_clip_num- 2 * post), 1)[0]
        source_slice = slice(source_index-pre, source_index+post)
        source_image_path_list = video_frames[source_slice]
        
        reference_index = random.sample(range(pre, video_clip_num-post), 5)
        reference_image_path_list = [video_frames[idx] for idx in reference_index]
        #%%
        # source_images = read_faces(source_image_path_list, landmarks[source_slice, :], radius, radius_1_4, resize)
        source_images = read_faces(source_image_path_list, None, radius, radius_1_4, resize)
        source_images = source_images.transpose(0, 3, 1, 2)
        # reference_images = read_faces(reference_image_path_list, landmarks[reference_index, :], radius, radius_1_4, resize)
        reference_images = read_faces(reference_image_path_list, None, radius, radius_1_4, resize)
        reference_images = reference_images.transpose(0, 3, 1, 2).reshape(-1, self.img_h, self.img_w)
        reference_images = np.stack([reference_images] * (self.clip_length), 0)
        #%%
        source_image_mask = source_images.copy()
        source_image_mask[..., self.radius:      self.radius + self.mouth_region_size,
                               self.radius_1_4:  self.radius_1_4 + self.mouth_region_size] = 0
        #%%
        audio_feature_clip = []
        for video_index in range(source_index-pre, source_index+post):
            audio_index = int(video_index * audio_mult)
            audio_feature_clip.append(audio_feature[audio_index-int(pre*audio_mult):audio_index+int(post*audio_mult), :])
        audio_feature_clip = torch.stack(audio_feature_clip, 0)
        audio_feature_full = audio_feature[int((source_index - 2 * pre) * audio_mult)
                                          :int((source_index + 2 * post) * audio_mult), :]

        # # 2 tensor
        source_images = torch.from_numpy(source_images).float()
        source_image_mask = torch.from_numpy(source_image_mask).float()
        reference_images = torch.from_numpy(reference_images).float()
        audio_feature_clip = audio_feature_clip.float().permute(0, 2, 1)
        audio_feature_full = audio_feature_full.float().permute(1, 0)
        return source_images, source_image_mask, reference_images, audio_feature_clip, audio_feature_full

    def __len__(self):
        return self.length

Dataset = DINetDataset



#%%
if __name__ == "__main__":
    from ezds.ezdlearn.utils import visual as vt
    from ezds.ezdlearn.config import load_config
    config = load_config('configs/DINet_frame.yaml')
    # radius = config.mouth_region_size//2
    # pre, post = 2, 3
    # audio_mult = 2
    # radius_1_4 = radius//4
    # img_w = radius * 2 + radius_1_4 * 2
    # img_h = radius * 3 + radius_1_4
    # resize = img_w, img_h
    # is_train = False
    # dirs = get_image_list(config.data_root ,'train' if is_train else 'validation', config.filelists)
    # video_name = dirs[0]
    dset = DINetDataset(config, is_train=True)
    for d in dset:
        break