import torch
from torch.utils.data import Dataset, DataLoader
import random
import numpy as np
import os
from PIL import Image
from tqdm import tqdm

level1 = {0:1,1:1,2:0,3:1,4:0,5:0,6:1,7:0,8:0,9:1,
          10:0,11:0,12:1,13:0,14:0,15:1,16:1,17:0,18:1,19:0,
          20:0,21:0,22:1,23:0,24:1}

level2 = {0:0,1:1,2:2,3:1,4:3,5:4,6:1,7:4,8:4,9:0,
          10:5,11:4,12:0,13:3,14:5,15:1,16:1,17:4,18:1,19:3,
          20:3,21:3,22:6,23:3,24:1}


def self_padding(img,size,num):
    img_new = np.ones(size)*num
    input_size = img.shape
    img_new[:input_size[0],:input_size[1],:] = img
    return img_new

def normalize(image):

    mean = np.mean(image)
    var = np.mean(np.square(image-mean))

    image = (image - mean)/np.sqrt(var)

    return image

class CustomDataSet(Dataset):
    def __init__(self, files):
        self.file_names = files

    def __getitem__(self, item):
        return self.file_names[item]

    def __len__(self):
        return len(self.file_names)


class collater():
    def __init__(self, path,task, max_height = 220, max_width = 220):
        self.path = path
        self.task = task
        self.max_height = max_height
        self.max_width = max_width

    def __call__(self, batch_data):
        N = len(batch_data)
        labels_new = torch.zeros(N, dtype=torch.long)
        dataset_new = torch.zeros((N, self.max_height, self.max_width, 3))
        image_ids = []
        for num, file in enumerate(batch_data):
            id, label = file.split(".")[0].split("_")
            image_ids.append(id)
            label = int(label)
            if self.task == 2:
                label = np.int(level1[label])
            elif self.task == 7:
                label = np.int(level2[label])
            elif self.task == 25:
                label = np.int(label)
            else:
                print("task is error")
                break
            labels_new[num] = label
            im = np.array(Image.open(self.path + file))
            # im_new = self_padding(normalize(im),(self.max_height, self.max_width, 3),0)
            im_new = self_padding(im, (self.max_height, self.max_width, 3), 0)
            dataset_new[num] = torch.from_numpy(im_new)

        return (image_ids, dataset_new.permute(0, 3, 1, 2), labels_new)

if __name__ == "__main__":
    SEED = 1
    random.seed(SEED)
    torch.manual_seed(SEED)
    # torch.backends.cudnn.deterministic = True
    load_path = "./Dataset/Train/"
    files = os.listdir(load_path)
    with open("./Dataset/gred_img_train.txt", "r") as f:
        gred_list = f.read().strip().split('\n')
    data_set = [x for x in files if x not in gred_list]
    data_set = CustomDataSet(data_set)
    collate_fn = collater(load_path)
    train_dataset = DataLoader(data_set, batch_size=1, shuffle=False, num_workers=2, collate_fn = collate_fn)
    train_pbar = tqdm(
        enumerate(
            train_dataset),
        total=len(train_dataset))
    for num, data in train_pbar:
        a = data