from torch.utils.data import Dataset, DataLoader
import numpy as np
import cv2
import os
import torch
import yaml
import csv


def Decode_Gaze360(line):
    anno = {}
    anno['face'] = line[0]
    anno['gaze2d'] = line[5]
    anno['info_length'] = 6
    return anno


def Decode_ETH(line):
    anno = {}
    anno['face'] = line[0]
    anno['gaze2d'] = line[1]
    anno['info_length'] = 7
    return anno


def Decode_MPII(line):
    anno = {}
    anno['face'] = line[0]
    anno['gaze2d'] = line[7]
    anno['info_length'] = 12
    return anno


def Decode_Diap(line):
    anno = {}
    anno['face'] = line[0]
    anno['gaze2d'] = line[6]
    anno['info_length'] = 11
    return anno


def Decode_Dict():
    mapping = {
        'gaze360': Decode_Gaze360,
        'eth': Decode_ETH,
        'mpii': Decode_MPII,
        'eyediap': Decode_Diap,
    }
    return mapping


def Get_Decode(name):
    mapping = Decode_Dict()
    name = name.lower()
    if name in mapping:
        return mapping[name]
    else:
        raise ValueError(f"Not a valid decode: {name}")


class loader(Dataset):
    def __init__(self, dataset):
        self.source = {}
        self.source['data_records'] = []
        self.source['img_root'] = dataset['image_path']
        self.source['is_eth'] = dataset['is_eth']
        self.source['decode'] = Get_Decode(dataset['name'])
        self.source['header'] = dataset['header'] if dataset['header'] != "None" else None
        self.source['landmark_indices'] = [468, 473, 130, 243, 463, 359, 27, 23, 257, 253]
        self.source['dataset_dirname'] = dataset.get('dataset_dirname', "")

        if isinstance(dataset['label_path'], list):
            for label in dataset['label_path']:
                self.process_csv(label)
        else:
            self.process_csv(dataset['label_path'])

    def process_csv(self, label_path):
        with open(label_path, 'r') as f:
            reader = csv.reader(f)
            if self.source['header'] is not None:
                next(reader)
            for row in reader:
                anno = self.source['decode'](row)
                gaze2d = np.array(anno['gaze2d'].split(',')).astype("float")
                landmarks = np.empty((len(self.source['landmark_indices']), 2)).astype("float")
                for i, idx in enumerate(self.source['landmark_indices']):
                    landmarks[i, 0] = float(row[anno['info_length'] + idx * 2])
                    landmarks[i, 1] = float(row[anno['info_length'] + idx * 2 + 1])
                self.source['data_records'].append({
                    'face': anno['face'],
                    'gaze2d': gaze2d,
                    'landmarks': landmarks
                })

    def __len__(self):
        return len(self.source['data_records'])

    def __getitem__(self, idx):
        record = self.source['data_records'][idx]
        label_gaze = record['gaze2d']
        if self.source['is_eth']:
            label_gaze = label_gaze[::-1]
        label_gaze = np.rad2deg(label_gaze) + 180.0
        gaze_yaw_pitch = label_gaze / 360.0
        landmarks = record['landmarks'] / 224.0
        img_dir = record['face'].replace("\\", "/").split("/")
        if len(img_dir)==2:
            new_path = '/'.join([img_dir[0], self.source['dataset_dirname'], img_dir[1]])
        else:
            new_path = '/'.join([img_dir[0], self.source['dataset_dirname'], img_dir[1], img_dir[2]])
        image_path = os.path.join(self.source['img_root'], new_path).replace("\\", "/")
        img = cv2.imread(image_path) / 255.0
        img = img.transpose(2, 0, 1)

        img = {
            "face": torch.from_numpy(img).type(torch.FloatTensor),
        }
        target = {
            "gaze_yaw_pitch": torch.from_numpy(gaze_yaw_pitch).type(torch.FloatTensor),
            "landmarks": torch.from_numpy(landmarks).type(torch.FloatTensor),
            "img_path": image_path,
        }
        return img, target


def txtload(source, batch_size, shuffle=False, num_workers=0):
    dataset = loader(source)
    print(f"-- [Read Data]: Total num: {len(dataset)}")
    print(f"-- [Read Data]: Source: {source['label']}")
    load = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
    return load


if __name__ == "__main__":
    print(111)
    yaml_path = './config_csv.yaml'
    with open(yaml_path, 'r', encoding='utf-8') as f:
        config = yaml.load(f, Loader=yaml.FullLoader)
    d = loader(config["data_eth"])
    for i in range(2000):
        data, label = d.__getitem__(i)
        file_name = label['img_path']
        print(label['gaze_yaw_pitch'])
        face_img = cv2.imread(file_name)
        landmarks1 = label['landmarks']
        for i, zuobiao in enumerate(landmarks1):
            cv2.circle(face_img, (int(zuobiao[0] * 224.0), int(zuobiao[1] * 224.0)), 1, (0, 255, 255), 1)
        cv2.imshow("test", face_img)
        cv2.waitKey(0)
