import sys
import re
from os import listdir
from os.path import join, isdir, isfile, exists

import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset, Sampler


def get_weight_list(dataset):
    labels = [int(data[1]) for data in dataset]
    unique_labels = np.unique(labels)
    labels_index_dict = {unique_labels[i]: [] for i in range(len(unique_labels))}
    for i, label in enumerate(labels):
        labels_index_dict[label].append(i)
    labels_weight = {}
    for k, v in labels_index_dict.items():
        labels_weight[k] = 1 / len(v)
    weight_list = []
    for label in labels:
        weight_list.append(labels_weight[label]) 
    return weight_list


class AortaDataset(Dataset):
    def __init__(self, img_dir, cate, transform) -> None:
        self.img_dir = img_dir
        self.transform = transform
        cate = [str(i) for i in cate]
        self.labels = sorted([label for label in listdir(img_dir) if isdir(join(img_dir, label)) and label in cate])
        self.datas = []
        for i, label in enumerate(self.labels):
            img_list = sorted(list(filter(lambda x: not x.startswith('.') and isfile(join(img_dir, label, x)), listdir(join(img_dir, label)))))
            for img in img_list:
                self.datas.append([join(img_dir, label, img), i])

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        img_dir, label = self.datas[index]
        img = self.transform(Image.open(img_dir))
        return img, label


class AortaDataset3D(Dataset):
    def __init__(self, img_dir, transform, depth, step=1, residual=False):
        self.img_dir = img_dir
        self.transform = transform
        self.depth = depth
        self.step = step
        self.residual = residual
        self.labels = sorted([label for label in listdir(img_dir) if isdir(join(img_dir, label))])
        self.datas = []
        for i, label in enumerate(self.labels):
            img_list = sorted(list(filter(lambda x: not x.startswith('.') and isfile(join(img_dir, label, x)), listdir(join(img_dir, label)))))
            il_len = len(img_list)
            for j in range(il_len - (depth-1)*step):
                nl = re.split('[_.]', img_list[j])
                assert len(nl) == 4, f'Format of image file name "{img_list[j]}" is wrong.'
                # nld = re.split('[_.]', img_list[j + (depth-1)*step])
                # assert len(nl) == 4 and len(nld) == 4, 'Format of image file name is wrong.'
                # if nl[0] != nld[0] or nl[1] != nld[1] or int(nl[2])+(depth-1)*step != int(nld[2]):
                #     continue
                s = 0
                group_list = []
                for k in range(j, j + (depth-1)*step+1):
                    nlk = re.split('[_.]', img_list[k])
                    assert len(nlk) == 4, f'Format of image file name "{img_list[k]}" is wrong.'
                    if nl[0] != nlk[0] or nl[1] != nlk[1]:
                        break
                    if int(nl[2]) + s*step == int(nlk[2]):
                        group_list.append(img_list[k])
                        s += 1
                        if s == depth:
                            break
                if s == depth:
                    self.datas.append([[join(img_dir, label, img) for img in group_list], i])
                # self.datas.append([[join(img_dir, label, img_list[k]) for k in range(j, j + (depth-1)*step+1)], i])

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        label = torch.tensor(self.datas[index][1], dtype=torch.long)
        img_path_list = self.datas[index][0]
        img_list = []
        # for img_path in img_path_list:
        #     img = Image.open(img_path)
        #     img = self.transform(img)
        #     if self.residual and img_list:
        #         res = img - img_list[-1]
        #         res = (res + 1) / 2
        #         img_list.append(res)
        #     img_list.append(img)
        for img_path in img_path_list:
            img_list.append(Image.open(img_path))
        img_list = self.transform(img_list)
        if self.residual:
            for i in range(1, len(img_list)):
                res = img_list[i] - img_list[i-1]
                res = (res + 1) / 2
                img_list.append(res)
        imgs = torch.stack(img_list, dim=1)
        return imgs, label

class AortaTest(Dataset):
    def __init__(self, img_dir, transform):
        self.img_dir = img_dir
        self.transform = transform
        self.datas = sorted(list(filter(lambda x: not x.startswith('.') and isfile(join(img_dir, x)), listdir(img_dir))))

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        img = Image.open(join(self.img_dir, self.datas[index]))
        img = self.transform(img)
        return img

class AortaTest3D(Dataset):
    def __init__(self, img_dir, transform, depth, step=1, residual=False):
        self.img_dir = img_dir
        self.transform = transform
        self.depth = depth
        self.step = step
        self.residual = residual
        self.files = sorted(list(filter(lambda x: not x.startswith('.') and isfile(join(img_dir, x)), listdir(img_dir))))
        self.datas = []
        il_len = len(self.files)
        for j in range(il_len - (depth-1)*step):
            nl = re.split('[_.]', self.files[j])
            assert len(nl) == 3, 'Format of image file name is wrong.'
            s = 0
            group_list = []
            for k in range(j, j + (depth-1)*step+1):
                nlk = re.split('[_.]', self.files[k])
                assert len(nlk) == 3, 'Format of image file name is wrong.'
                assert nl[0] == nlk[0], 'Name of crops are different!'
                if int(nl[1]) + s*step == int(nlk[1]):
                    group_list.append(self.files[k])
                    s += 1
                    if s == depth:
                        break
            if s == depth:
                self.datas.append([join(img_dir, img) for img in group_list])

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        img_path_list = self.datas[index]
        img_list = []
        for img_path in img_path_list:
            img = Image.open(img_path)
            img = self.transform(img)
            if self.residual and img_list:
                res = img - img_list[-1]
                #res = (res + 1) / 2
                img_list.append(res)
            img_list.append(img)
        imgs = torch.stack(img_list, dim=1)
        return imgs


class LabelSampler(Sampler[int]):
    def __init__(self, data_source, shuffle=True):
        self.data_source = data_source
        self.shuffle = shuffle
        self.labels = [data_source[i][1] for i in range(len(data_source))]
        self.unique_labels = np.unique(self.labels)
        self.labels_index_dict = {self.unique_labels[i]: [] for i in range(len(self.unique_labels))}
        for i, label in enumerate(self.labels):
            self.labels_index_dict[label].append(i)
        self.shortest_label = -1
        self.shortest_label_len = sys.maxsize
        for k, v in self.labels_index_dict.items():
            if len(v) < self.shortest_label_len:
                self.shortest_label = k
                self.shortest_label_len = len(v)
        assert self.shortest_label != -1

    def __iter__(self):
        sample_list = []
        for k, v in self.labels_index_dict.items():
            if k == self.shortest_label:
                sample_list.extend(v)
            else:
                sample_list += np.random.choice(v, self.shortest_label_len, False).tolist()
        if self.shuffle:
            np.random.shuffle(sample_list)
        return iter(sample_list)

    def __len__(self):
        return self.shortest_label_len * len(self.unique_labels)


class AortaDataset3DCenter(Dataset):
    def __init__(self, img_dir, cate, transform, depth, step=1, residual=False, supcon=False, mask_dir=None):
        self.img_dir = img_dir
        self.transform = transform
        assert depth % 2 == 1, 'depth should be odd number.'
        self.depth = depth
        self.step = step
        self.residual = residual
        self.supcon = supcon
        self.mask_dir = mask_dir
        assert not (mask_dir is None and residual == True)
        cate = [str(i) for i in cate]
        self.labels = sorted([label for label in listdir(img_dir) if isdir(join(img_dir, label)) and label in cate])
        self.datas = []
        for i, label in enumerate(self.labels):
            img_list = sorted(list(filter(lambda x: not x.startswith('.') and isfile(join(img_dir, label, x)), listdir(join(img_dir, label)))))
            il_len = len(img_list)
            for j in range(il_len):
                nl = re.split('[_.]', img_list[j])
                assert len(nl) == 4 or len(nl) == 5, f'Format of image file name "{img_list[j]}" is wrong.'
                if len(nl) == 5:
                    continue
                group_list = [img_list[j]]
                half = s = depth // 2
                for k in range(j-1, j-step*(half)-1, -1):
                    if k < 0 or k >= il_len:
                        for _ in range(s):
                            group_list.insert(0, group_list[0])
                        break
                    nlk = re.split('[_.]', img_list[k])
                    if nl[0] != nlk[0] or nl[1] != nlk[1]:
                        for _ in range(s):
                            group_list.insert(0, group_list[0])
                        break
                    offset = int(nl[2]) - int(nlk[2])
                    if offset % step == 0:
                        for _ in range(offset//step-(half-s)):
                            group_list.insert(0, img_list[k])
                            s -= 1
                            if s == 0:
                                break
                    if s == 0:
                        break
                s = depth // 2
                for k in range(j+1, j+step*(half)+1):
                    if k < 0 or k >= il_len:
                        for _ in range(s):
                            group_list.insert(0, group_list[0])
                        break
                    nlk = re.split('[_.]', img_list[k])
                    if nl[0] != nlk[0] or nl[1] != nlk[1]:
                        for _ in range(s):
                            group_list.append(group_list[-1])
                        break
                    offset = int(nlk[2]) - int(nl[2])
                    if offset % step == 0:
                        for _ in range(offset//step-(half-s)):
                            group_list.append(img_list[k])
                            s -= 1
                            if s == 0:
                                break
                    if s == 0:
                        break
                assert len(group_list) == depth, f'depth wrong: {img_list[j]}'
                if mask_dir is None:
                    self.datas.append([[join(img_dir, label, img) for img in group_list], i])
                else:
                    group_mask_list = [join(mask_dir, img) if exists(join(mask_dir, img)) else None for img in group_list]
                    self.datas.append([[join(img_dir, label, img) for img in group_list], i, group_mask_list])

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        label = torch.tensor(self.datas[index][1], dtype=torch.long)
        img_list = []
        for img_path in self.datas[index][0]:
            img_list.append(Image.open(img_path))
        if self.supcon:
            img_list1 = self.transform(img_list)
            if self.residual:
                for i in range(1, len(img_list1)):
                    res = img_list1[i] - img_list1[i-1]
                    res = (res + 1) / 2
                    img_list1.append(res)
            imgs1 = torch.stack(img_list1, dim=1)
            img_list2 = self.transform(img_list)
            if self.residual:
                for i in range(1, len(img_list2)):
                    res = img_list2[i] - img_list2[i-1]
                    res = (res + 1) / 2
                    img_list2.append(res)
            imgs2 = torch.stack(img_list2, dim=1)
            return [imgs1, imgs2], label
        elif self.mask_dir is None:
            img_list = self.transform(img_list)
            if self.residual:
                for i in range(1, len(img_list)):
                    res = img_list[i] - img_list[i-1]
                    res = (res + 1) / 2
                    img_list.append(res)
            imgs = torch.stack(img_list, dim=1)
            return imgs, label
        else:
            mask_list = []
            for i, mask_path in enumerate(self.datas[index][2]):
                if mask_path is None:
                    mask = Image.fromarray(np.ones((img_list[i].height, img_list[i].width), dtype=np.uint8)*255)
                else:
                    mask = Image.open(mask_path)
                mask_list.append(mask)
            cat_list = img_list + mask_list
            cat_list = self.transform(cat_list)
            img_list, mask_list = cat_list[:self.depth], cat_list[self.depth:]
            imgs, masks = torch.stack(img_list, dim=1), torch.stack(mask_list, dim=1)
            return imgs, label, masks


class MultiChannel(Dataset):
    def __init__(self, img_dir, c2_dir, c3_dir, transform):
        self.img_dir = img_dir
        self.transform = transform
        self.labels = sorted([label for label in listdir(img_dir) if isdir(join(img_dir, label))])
        self.datas = []
        not_exist_list = []
        for i, label in enumerate(self.labels):
            img_list = sorted(list(filter(lambda x: not x.startswith('.') and isfile(join(img_dir, label, x)), listdir(join(img_dir, label)))))
            for img in img_list:
                c1 = join(img_dir, label, img)
                c2 = join(c2_dir, label, img)
                c3 = join(c3_dir, label, img)
                if not exists(c2) or not exists(c3):
                    if not exists(c2):
                        not_exist_list.append(c2)
                    if not exists(c3):
                        not_exist_list.append(c3)
                    continue
                self.datas.append([[c1, c2, c3], i])

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        label = torch.tensor(self.datas[index][1], dtype=torch.long)
        img_path_list = self.datas[index][0]
        img_list = []
        for img_path in img_path_list:
            img_list.append(Image.open(img_path))
        img = Image.merge('RGB', img_list)
        img = self.transform(img)
        return img, label


class MaskDataset(Dataset):
    def __init__(self, img_dir, mask_dir, cate, transform):
        self.img_dir = img_dir
        self.mask_dir = mask_dir
        self.transform = transform
        cate = [str(i) for i in cate]
        self.labels = sorted([label for label in listdir(img_dir) if isdir(join(img_dir, label)) and label in cate])
        self.datas = []
        for i, label in enumerate(self.labels):
            img_list = sorted(list(filter(lambda x: not x.startswith('.') and isfile(join(img_dir, label, x)), listdir(join(img_dir, label)))))
            for img in img_list:
                img_path = join(img_dir, label, img)
                mask_path = join(mask_dir, img)
                if not exists(mask_path):
                    mask_path = None
                self.datas.append([img_path, mask_path, i])

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        label = torch.tensor(self.datas[index][2], dtype=torch.long)
        img = Image.open(self.datas[index][0])
        if self.datas[index][1] is None:
            mask = Image.fromarray(np.ones((img.height, img.width), dtype=np.uint8)*255)
        else:
            mask = Image.open(self.datas[index][1])
        img, mask = self.transform([img, mask])
        return img, label, mask