import random

import cv2
import numpy as np
import torch
import torch.utils.data as data

from utity import _load


class ToTensor(object):
    """
    图片读取并转为tensor
    """
    def __call__(self, pic):
        if isinstance(pic, np.ndarray):
            img = torch.from_numpy(pic.transpose((2, 0, 1)))
            return img.float()

    def __repr__(self):
        return self.__class__.__name__ + '()'


class Normalize(object):
    """
    图片数值归一化
    """
    def __init__(self, mean=127.5, std=128):
        self.mean = mean
        self.std = std

    def __call__(self, tensor):
        tensor.sub_(self.mean).div_(self.std)
        return tensor


class LP300WDataSet(data.Dataset):
    def __init__(self, ori_fp, param_fp, is_train, transform=None, std_size=120):
        """ 加载预处理好的数据集

        Args:
            ori_fp: 数据集图片目录
            param_fp: 生成的参数文件路径 /dataset/
            is_train: True训练集, False验证集
            transform: 图片处理
            std_size: 图片大小
        """
        self.ori_fp = ori_fp
        self.transform = transform
        self.std_size = std_size
        if self.std_size == 224:
            self.ori_fp = str(ori_fp).replace('120', '224')
        self.is_train = is_train
        if is_train:
            self.names = _load(param_fp+'train_a.pkl')
            # self.params = _load(param_fp+'param_train_normal.pkl')
            # self.params = _load(param_fp+'param_train_normal_fixed.pkl')
            self.params = _load(param_fp+'param_train_normal_a.pkl')
            # self.lm = _load(param_fp+'lm_train.pkl')
        else:
            self.names = _load(param_fp + 'val.pkl')
            # self.params = _load(param_fp + 'param_val_normal.pkl')
            self.params = _load(param_fp + 'param_val_normal_fixed.pkl')
            # self.lm = _load(param_fp+'lm_val.pkl')

    def __getitem__(self, item):
        path = self.ori_fp+self.names[item]
        img = cv2.imread(path, cv2.IMREAD_COLOR)
        # img = cv2.resize(img, dsize=(self.std_size, self.std_size), interpolation=cv2.INTER_LINEAR)
        target = self.params[item]
        # if self.is_train:
        # target = np.concatenate((target, self.lm[item]))
        if self.transform is not None:
            img = self.transform(img)
        return img, target

    def __len__(self):
        return len(self.names)


class WLFWLAPADataSet(data.Dataset):
    def __init__(self, ori_fp, param_fp, is_train, transform=None, std_size=120):
        """ 加载预处理好的数据集

        Args:
            ori_fp: 数据集图片目录
            param_fp: 生成的参数文件路径 /dataset/
            is_train: True训练集, False验证集
            transform: 图片处理
            std_size: 图片大小
        """
        self.ori_fp = ori_fp
        self.transform = transform
        self.std_size = std_size
        self.is_train = is_train
        if is_train:
            d = _load(param_fp+'train_68.pkl')
            self.names = d['names']
            self.params = d['pts']
        else:
            d = _load(param_fp + 'val_68.pkl')
            self.names = d['names']
            self.params = d['pts']
        paired_lists = list(zip(self.names, self.params))
        random.shuffle(paired_lists)
        l1, l2 = zip(*paired_lists)
        self.names = list(l1)
        self.params = list(l2)


    def __getitem__(self, item):
        path = self.ori_fp+self.names[item]
        img = cv2.imread(path, cv2.IMREAD_COLOR)
        img = cv2.resize(img, dsize=(self.std_size, self.std_size), interpolation=cv2.INTER_LINEAR)
        target = self.params[item]
        if self.transform is not None:
            img = self.transform(img)
        return img, target

    def __len__(self):
        return len(self.names)
