from easydict import EasyDict as edict
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
import cv2
import os
import torch
import yaml


def Decode_Gaze360(line):
    anno = edict()
    anno.face = line[0]
    anno.gaze2d = line[5]
    anno.info_length = 6
    return anno


def Decode_ETH(line):
    anno = edict()
    anno.face = line[0]
    anno.gaze2d = line[1]
    anno.info_length = 7
    return anno


def Decode_MPII(line):
    anno = edict()
    anno.face = line[0]
    anno.gaze2d = line[7]
    anno.info_length = 12
    return anno


def Decode_Diap(line):
    anno = edict()
    anno.face = line[0]
    anno.gaze2d = line[6]
    anno.info_length = 11
    return anno


def Decode_Dict():
    # 注释的是puregaze的方法
    # mapping = edict()
    # mapping.gaze360 = Decode_Gaze360
    # mapping.eth = Decode_ETH
    # mapping.mpii = Decode_MPII
    # mapping.eyediap = Decode_Diap
    mapping = {
        'gaze360': Decode_Gaze360,
        'eth': Decode_ETH,
        'mpii': Decode_MPII,
        'eyediap': Decode_Diap,
    }
    return mapping


def Get_Decode(name):
    mapping = Decode_Dict()
    name = name.lower()
    if name in mapping:
        return mapping[name]
    else:
        raise ValueError(f"not a valid decode")


class loader(Dataset):
    def __init__(self, dataset):
        # 我的传入dataset要有name, label_path, image_path header = None
        # is_eth  因为eth的yaw和pitch顺序和其他的不同
        # 因为可能最后会在csv中加列名，所以还是要header
        self.source = edict()
        self.source.lines = []
        self.source.img_root = dataset.image_path
        self.source.is_eth = dataset.is_eth
        self.source.decode = Get_Decode(dataset.name)
        header = dataset.header
        if header == "None":
            header = None
        if isinstance(dataset.label_path, list):
            for label in dataset.label_path:
                line1 = np.asarray(pd.read_csv(label, header=header))
                self.source.lines.extend(line1)
        else:
            self.source.lines = np.asarray(pd.read_csv(dataset.label_path, header=header))

    def __len__(self):
        return len(self.source.lines)

    def __getitem__(self, idx):
        # read source data
        line = self.source.lines[idx]
        anno = self.source.decode(line)

        # get gaze info
        label_gaze = np.array(anno.gaze2d.split(",")).astype("float")
        if self.source.is_eth:
            temp = label_gaze[0]
            label_gaze[0] = label_gaze[1]
            label_gaze[1] = temp
        # 转换为角度并归一化
        label_gaze = np.rad2deg(label_gaze) + 180.0
        gaze_yaw_pitch = label_gaze / 360.0  # 2d注视信息
        # get landmark info
        landmarks = []
        landmarks.append(np.asarray([line[anno.info_length + 468 * 2], line[anno.info_length + 468 * 2 + 1]]).astype(float))
        landmarks.append(np.asarray([line[anno.info_length + 473 * 2], line[anno.info_length + 473 * 2 + 1]]).astype(float))
        landmarks.append(np.asarray([line[anno.info_length + 130 * 2], line[anno.info_length + 130 * 2 + 1]]).astype(float))  # 左眼左眼角
        landmarks.append(np.asarray([line[anno.info_length + 243 * 2], line[anno.info_length + 243 * 2 + 1]]).astype(float))  # 左眼右眼角
        landmarks.append(np.asarray([line[anno.info_length + 463 * 2], line[anno.info_length + 463 * 2 + 1]]).astype(float))  # 右眼左眼角
        landmarks.append(np.asarray([line[anno.info_length + 359 * 2], line[anno.info_length + 359 * 2 + 1]]).astype(float))  # 右眼右眼角
        landmarks.append(np.asarray([line[anno.info_length + 27 * 2], line[anno.info_length + 27 * 2 + 1]]).astype(float))  # 左眼上
        landmarks.append(np.asarray([line[anno.info_length + 23 * 2], line[anno.info_length + 23 * 2 + 1]]).astype(float))  # 左眼下
        landmarks.append(np.asarray([line[anno.info_length + 257 * 2], line[anno.info_length + 257 * 2 + 1]]).astype(float))  # 右眼上
        landmarks.append(np.asarray([line[anno.info_length + 253 * 2], line[anno.info_length + 253 * 2 + 1]]).astype(float))  # 右眼下

        landmarks = np.array(landmarks)
        landmarks = landmarks / 224.0
        # load image
        image_path = os.path.join(self.source.img_root, anno.face).replace("\\", "/")
        img = cv2.imread(image_path) / 255.0
        img = img.transpose(2, 0, 1)

        # 返回所有的信息
        img = {
            "face": torch.from_numpy(img).type(torch.FloatTensor),
        }
        target = {
            "gaze_yaw_pitch": torch.from_numpy(gaze_yaw_pitch).type(torch.FloatTensor),
            "landmarks": torch.from_numpy(landmarks).type(torch.FloatTensor),
            "img_path": image_path,  # 图片的绝对路径
        }
        return img, target


def txtload(source, batch_size, shuffle=False, num_workers=0):
    dataset = loader(source)
    print(f"-- [Read Data]: Total num: {len(dataset)}")
    print(f"-- [Read Data]: Source: {source.label}")
    load = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
    return load


if __name__ == "__main__":
    print(111)
    yaml_path = 'config_diap_csv.yaml'
    config = edict(yaml.load(open(yaml_path), Loader=yaml.FullLoader))
    # print(config.data.label_path)
    # config.data.label_path = r'F:\Gaze_Dataset\eyediap\label_csv\google_allImage_noselectdiap.csv'
    d = loader(config.data_eth)
    for i in range(2000):
        data, label = d.__getitem__(i)
        file_name = label['img_path']
        face_img = cv2.imread(file_name)
        landmarks1 = label['landmarks']
        for i, zuobiao in enumerate(landmarks1):
            cv2.circle(face_img, (int(zuobiao[0]*224.0), int(zuobiao[1]*224.0)), 1, (0, 255, 255), 1)
        cv2.imshow("test", face_img)
        # image_save_path = file_name.replace("/Image/", "/Result_Target/")
        # folder_path = os.path.dirname(image_save_path)
        # os.makedirs(folder_path, exist_ok=True)
        # cv2.imwrite(image_save_path, face_img)
        cv2.waitKey(0)