# ready the dataset, Not use ImageFolder as the author did
import linecache
import random
import PIL.ImageOps
import PIL
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset

from Siamese.Config import Config


class MyDataset(Dataset):
    def __init__(self, txt, transform=None, target_transform=None, should_invert=False,
                 onlysame=False, gray=False):
        self.transform = transform
        self.target_transform = target_transform
        self.should_invert = should_invert
        self.onlysame = onlysame
        self.gray = gray
        self.pairs = []
        ori = open(txt, 'r')
        lines = ori.readlines()
        # 每个类别内排列组合，然后取等量的类别外的pair
        lastlabel = lines[0].split()[1]
        labellinelist = []
        for line in lines:
            label = line.split()[1]
            # # TODO 先只训练两个人的
            if int(label) not in Config.typelist:
                continue
            if label != lastlabel:
                for i in range(0, len(labellinelist) - 1):
                    for j in range(i + 1, len(labellinelist)):
                        self.pairs.append(labellinelist[i] + labellinelist[j])
                        if not self.onlysame:
                            # 随机一个，大概率是不同的
                            index = random.randint(0, len(lines) - 1)
                            self.pairs.append(lines[index] + ' ' + labellinelist[i])
                        # break
                labellinelist.clear()
                lastlabel = label
            line.strip('\n')
            labellinelist.append(line)
        # 处理最后一个标签
        # for i in range(0, len(labellinelist) - 1):
        #     for j in range(i + 1, len(labellinelist)):
        #         self.pairs.append(labellinelist[i] + labellinelist[j])
        #         # 随机一个，大概率是不同的
        #         index = random.randint(1, len(lines))
        #         self.pairs.append(lines[index] + labellinelist[i])
        print(len(self.pairs))

    def __getitem__(self, index):
        pair = self.pairs[index].split()
        img0 = Image.open(pair[0])
        img1 = Image.open(pair[2])
        if self.gray:
            img0 = img0.convert("L")
            img1 = img1.convert("L")

        if self.should_invert:
            img0 = PIL.ImageOps.invert(img0)
            img1 = PIL.ImageOps.invert(img1)

        if self.transform is not None:
            img0 = self.transform(img0)
            img1 = self.transform(img1)

        return img0, img1, torch.from_numpy(np.array([int(pair[1] != pair[3])], dtype=np.float32))

    def __len__(self):
        return len(self.pairs)
