"""Load DDFADataset."""
import os.path as osp

import mindspore
import numpy as np
import cv2
from pathlib import Path
import mindspore.dataset as ds
from mindspore import Tensor, context
import mindspore.dataset.vision as vision
from mind3d.utils.synergynet_util import _load, Crop, Compose_GT
import mindspore.ops as ops
import mindspore.dataset.transforms as transforms
import cv2


def img_loader(path):
    return cv2.imread(path, cv2.IMREAD_COLOR)


class DDFADataset:
    """
    A source dataset that reads, parses and augments the AFLW2000 dataset.

    About DDFADataset dataset:
    The DDFADataset is a face dataset that consists of 300 Indoor and 300 Outdoor in-the-wild images. 
    It covers '__header__', '__version__', '__globals__', 'pt2d', 'roi', 'Illum_Para', 'Color_Para', 'Tex_Para', 'Shape_Para', 'Exp_Para', 'Pose_Para'.
    
    Args:
        filelists(str): The root directory of the AFLW2000 dataset or inference image.
        root (str): The root directory of AFLW2000-3D_crop.
        param_fp (str):The root directory of param_all_norm_v201.pkl.
        
    Citation:
        @misc{3ddfa_cleardusk,
                author =       {Guo, Jianzhu and Zhu, Xiangyu and Lei, Zhen},
                title =        {3DDFA},
                howpublished = {github.com/cleardusk/3DDFA},
                year =         {2018}
                    }
    """
    def __init__(self, root, filelists, param_fp):
        self.root = root
        self.lines = Path(filelists).read_text().strip().split("\n")
        self.params = _load(param_fp)
        self.img_loader = img_loader
        self.maximum = 5

    def _target_loader(self, index):
        target_param = self.params[index]
        target = target_param
        return target

    def __getitem__(self, index):
        path = osp.join(self.root, self.lines[index])
        data = np.array(self.img_loader(path)).astype(np.float32)
        target = self._target_loader(index).astype(np.float32)

        colorjitter = vision.RandomColorAdjust(0.4, 0.4, 0.4)
        transpose = vision.HWC2CHW()
        crop = Crop(5, mode='train')
        mean_channel = [127.5]
        std_channel = [128]
        normalize_op = vision.Normalize(mean=mean_channel, std=std_channel)
        self.trans = Compose_GT([colorjitter, transpose, crop, normalize_op])

        img, target = self.trans(data, target)

        return img, target

    def get_params(self, img):
        h = img.shape[1]
        w = img.shape[2]
        crop_margins = self.maximum  # random.randint(0,self.maximum)

        return crop_margins, h, w

    def __len__(self):
        return len(self.lines)


class DDFATestDataset:
    def __init__(self, filelist, root="", transform=None):
        self.root = root
        self.transform = transform
        self.line = Path(filelist).read_text().strip().split("\n")

    def __getitem__(self, index):
        path = osp.join(self.root, self.line[index])
        img = img_loader(path)
        if self.gettransform:
            img = self.transform(img)
        return img

    def __len__(self):
        return len(self.line)


if __name__ == "__main__":
    context.set_context(device_id=5, mode=context.GRAPH_MODE)
    dataset_generator = DDFADataset(root='./SynergyNet/3dmm_data/train_aug_120x120',
                                    filelists='./SynergyNet/3dmm_data/train_aug_120x120.list.train',
                                    param_fp='./SynergyNet/3dmm_data/param_all_norm_v201.pkl',
                                    )
    # data = dataset_generator[0]
    dataset = ds.GeneratorDataset(dataset_generator, ["data", "target"], shuffle=False)
    dataset = dataset.batch(1)
    for data in dataset.create_dict_iterator():
        img = data['data']
        target = data['target']
