import os
import random

import pandas as pd
from PIL import Image
import torch
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader

labels_dir = os.path.join('.', 'labels')
sk_dir = os.path.join('.', 'data', 'sketch')
# sk_load = os.path.join(labels_dir, 'PART-SHREC14-EDGE', 'sk_edge.hdf5')
# cad_load = os.path.join(labels_dir, 'PART-SHREC14-EDGE', 'cad_edge.hdf5')
sk_load = os.path.join(labels_dir, 'SHREC14-EDGE', 'sk_edge.hdf5')
cad_load = os.path.join(labels_dir, 'SHREC14-EDGE', 'cad_edge.hdf5')
# sk_load = os.path.join(labels_dir, 'SHREC13-EDGE', 'sk_edge.hdf5')
# cad_load = os.path.join(labels_dir, 'SHREC13-EDGE', 'cad_edge.hdf5')
sketch_feature = os.path.join('.', 'features', 'sketch_features_resnet50.pth')
shape_feature = os.path.join('.', 'features', 'shape_features_resnet50.pth')
feature_path = [sketch_feature, shape_feature]


def default_sk_loader(path: str):
    """Sketch data loader.

    Args:
        path: Sketch file path.

    Returns:
        Sketch matrix, 224 x 224.
    
    """
    return Image.open(path)


def default_cad_loader(views_path_arr: list, transform: transforms):
    """3D model views data loader.

    Args:
        views_path_arr: Path list of the 3D model.
        transform: Used for transformation on the picture
    Returns:
        12 views tensor matrixs, id of the model.

    """
    views = []
    for path in views_path_arr:
        if transform is not None:
            views.append(transform(Image.open(path).convert('L')))
        else:
            views.append(Image.open(path).convert('L'))
    file_name = os.path.basename(views_path_arr[0])
    id = file_name.split('_')[-2]
    return torch.stack(views), id


class SketchDataset(Dataset):
    '''Generate sketch dataset

    Attributes:
        path:
            Path of the sketches.
        split:
            Partition of data sets.
        transform:
            Transformation applied to the image.
        loader:
            Loader of each sketch.

    '''
    def __init__(self,
                 path: str = sk_load,
                 split: str = 'train',
                 transform: transforms = None,
                 loader=default_sk_loader):
        # load the .hdf5 file to get data path
        sk_pd = pd.read_hdf(path, 'sk')
        # get labels
        labels = sorted(list(set(sk_pd['cat'])))
        label_map = {}
        classes = len(labels)
        for i in range(classes):
            label_map[labels[i]] = i
        self.imgs = []
        self.mapping = {i: [] for i in range(classes)}
        for index, row in sk_pd.iterrows():
            if row['split'] == split:
                _label = label_map[row['cat']]
                self.imgs.append((index, _label))
                self.mapping[_label].append(index)
        # self.imgs = imgs
        self.transform = transform
        self.loader = loader

    def __getitem__(self, index):
        fn, label = self.imgs[index]
        id = os.path.basename(fn).split('.')[0]
        img = self.loader(fn)
        if self.transform is not None:
            img = self.transform(img)
        return img, label, id

    def __len__(self):
        return len(self.imgs)

    def get_contrast_sample(self, anchors_label: torch.tensor):
        pos_samples, neg_samples = [], []
        for a in anchors_label:
            # get positive samples
            p_label = a.item()
            sample = self.loader(random.choice(self.mapping[p_label]))
            if self.transform is not None:
                sample = self.transform(sample)
            pos_samples.append(sample)
            # get negative samples
            n_label = random.randint(0, 47)
            while n_label == p_label:
                n_label = random.randint(0, 47)
            sample = self.loader(random.choice(self.mapping[n_label]))
            if self.transform is not None:
                sample = self.transform(sample)
            neg_samples.append(sample)
        pos_samples = torch.stack(pos_samples, 0)
        neg_samples = torch.stack(neg_samples, 0)
        return pos_samples, neg_samples


class ModelViewDataset(Dataset):
    '''Generate 3D model views(edge maps) dataset

    Edge maps are grouped in groups of 12.

    Args:
        path:
            Path of the edge maps.
        split:
            Partition of data sets.
        transform:
            Transformation applied to the image.
        loader:
            Loader of each each group of edge maps.

    '''
    def __init__(self,
                 path: str = cad_load,
                 split: str = 'train',
                 transform: transforms = None,
                 loader=default_cad_loader):
        # load the .hdf5 file to get data path
        cad_pd = pd.read_hdf(path, 'cad')
        # get labels
        labels = sorted(list(set(cad_pd['cat'])))
        classes = len(labels)
        label_map = {}
        for i in range(classes):
            label_map[labels[i]] = i
        # views: ([12-views path], label)
        self.views = []
        self.mapping = {i: [] for i in range(classes)}
        tmp_views = []
        count = 0
        for index, row in cad_pd.iterrows():
            if row['split'] == split:
                tmp_views.append(index)
                count += 1
                if count % 12 == 0:
                    count = 0
                    self.views.append((tmp_views, label_map[row['cat']]))
                    self.mapping[label_map[row['cat']]].append(tmp_views)
                    tmp_views = []
        for label in self.mapping:
            samples_num = len(self.mapping[label])
            if samples_num and samples_num < 50:
                padding_times = 50 // samples_num + 1
                self.mapping[label] = self.mapping[label] * padding_times
        self.transform = transform
        self.loader = loader

    def __getitem__(self, index):
        views_arr, label = self.views[index]
        views, id = self.loader(views_arr, self.transform)
        return views, label, id

    def __len__(self):
        return len(self.views)

    def get_contrast_sample(self, anchors_label: torch.tensor):
        pos_samples, neg_samples = [], []
        for a in anchors_label:
            # get positive samples
            p_label = a.item()
            sample, _ = self.loader(random.choice(self.mapping[p_label]), self.transform)
            pos_samples.append(sample)
            # get negative samples
            n_label = random.randint(0, 47)
            while n_label == p_label:
                n_label = random.randint(0, 47)
            sample, _ = self.loader(random.choice(self.mapping[n_label]), self.transform)
            neg_samples.append(sample)
        pos_samples = torch.stack(pos_samples, 0)
        neg_samples = torch.stack(neg_samples, 0)
        return pos_samples, neg_samples


class FeatureDataset(Dataset):
    '''Generate processed features dataset

    Attributes:
        path:
            Path of features file.
        split:
            Partition of data sets.
        transform:
            Transformation applied to the features.

    '''
    def __init__(self, path: str, split: str = 'train'):
        self.feature, self.ids, self.label = [], [], []
        self.mapping, self.neg_mapping = {}, {}
        l = []
        features = torch.load(path)
        for c, samples in features[split].items():
            for s in samples:
                for id, f in s.items():
                    _label = int(c)
                    _f = f.squeeze().to('cpu')
                    self.feature.append(_f)
                    self.ids.append(id)
                    self.label.append(_label)
                    l.append(_label)
                    if _label in self.mapping:
                        self.mapping[_label].append(_f)
                    else:
                        self.mapping[_label] = [_f]
        for neg_label in range(48):
            self.neg_mapping[neg_label] = []
            for pos_label in self.mapping:
                if not neg_label == pos_label:
                    self.neg_mapping[neg_label].extend(self.mapping[pos_label])

    def __getitem__(self, index):
        return self.feature[index], self.label[index], self.ids[index]

    def __len__(self):
        return len(self.feature)

    def get_contrast_sample(self, anchors_label: torch.tensor):
        pos_samples, neg_samples = [], []
        for a in anchors_label:
            p_label = a.item()
            pos_samples.append(random.choice(self.mapping[p_label]))
            n_label = random.randint(0, 47)
            while n_label == p_label:
                n_label = random.randint(0, 47)
            neg_samples.append(random.choice(self.mapping[n_label]))
        pos_samples = torch.stack(pos_samples, 0)
        neg_samples = torch.stack(neg_samples, 0)
        return pos_samples, neg_samples


class SketchAndShapeViewsDataset(Dataset):
    '''Generate sketch dataset

    Attributes:
        path:
            Path of the sketches.
        split:
            Partition of data sets.
        transform:
            Transformation applied to the image.
        loader:
            Loader of each sketch.

    '''
    def __init__(self,
                 sk_path: str = sk_load,
                 cad_path: str = cad_load,
                 split: str = 'train',
                 transform: transforms = None,
                 loader=default_sk_loader):
        self.imgs = []
        self.transform = transform
        self.loader = loader
        # sketch data
        # load the .hdf5 file to get data path
        sk_pd = pd.read_hdf(sk_path, 'sk')
        # get sketches' paths and labels
        labels = sorted(list(set(sk_pd['cat'])))
        label_map = {}
        self.classes = len(labels)
        for i in range(self.classes):
            label_map[labels[i]] = i
        self.sk_mapping = {i: [] for i in range(self.classes)}
        for index, row in sk_pd.iterrows():
            if row['split'] == split:
                _label = label_map[row['cat']]
                self.imgs.append((index, _label, 0))
                self.sk_mapping[_label].append(index)

        # model views data
        # load the .hdf5 file to get data path
        cad_pd = pd.read_hdf(cad_path, 'cad')
        # get views' paths and labels
        self.cad_mapping = {i: [] for i in range(self.classes)}
        for index, row in cad_pd.iterrows():
            if row['split'] == split:
                _label = label_map[row['cat']]
                self.imgs.append((index, _label, 1))
                self.cad_mapping[_label].append(index)

    def __getitem__(self, index):
        fn, label, domain = self.imgs[index]
        # id = os.path.basename(fn).split('.')[0]
        img = self.loader(fn)
        if self.transform is not None:
            img = self.transform(img)
        return img, label, domain
        # return img, label, id, domain

    def __len__(self):
        return len(self.imgs)

    def get_contrast_sample(self, anchors_label: torch.tensor, domains: torch.tensor):
        pos_samples, neg_samples = [], []
        for a, d in zip(anchors_label, domains):
            # get positive samples
            p_label = a.item()
            sample_domain = d.item()
            sample = self.get_single_contrast_sample(p_label, sample_domain)
            pos_samples.append(sample)
            # get negative samples
            n_label = random.randint(0, self.classes - 1)
            while n_label == p_label:
                n_label = random.randint(0, self.classes - 1)
            sample = self.get_single_contrast_sample(n_label, sample_domain)
            neg_samples.append(sample)
        pos_samples = torch.stack(pos_samples, 0)
        neg_samples = torch.stack(neg_samples, 0)
        return pos_samples, neg_samples

    def get_single_contrast_sample(self, label: int, sample_domain: int):
        # sketch domain
        if sample_domain == 0:
            sample = self.loader(random.choice(self.cad_mapping[label]))
        # 3D shape domain
        else:
            sample = self.loader(random.choice(self.sk_mapping[label]))
        if self.transform is not None:
            sample = self.transform(sample)
        return sample


if __name__ == '__main__':
    # load sketches and 3D shape views data
    # train_data = SketchAndShapeViewsDataset(sk_path=sk_load,
    #                                         cad_path=cad_load,
    #                                         transform=transforms.ToTensor(),
    #                                         split='train')
    # test_data = SketchAndShapeViewsDataset(sk_path=sk_load,
    #                                        cad_path=cad_load,
    #                                        transform=transforms.ToTensor(),
    #                                        split='test')
    # train_loader = DataLoader(dataset=train_data, batch_size=64, shuffle=True)
    # test_loader = DataLoader(dataset=test_data, batch_size=64, shuffle=True)
    # for i, data in enumerate(train_loader):
    #     sk, label, domain = data
    #     # data shape: torch.Size([64, 1, 224, 224])
    #     # data label shape: torch.Size([64])
    #     print('data shape:', sk.shape, 'data label shape:', label.shape, 'domain: ', domain)
    #     pos_samples, neg_samples = train_data.get_contrast_sample(label, domain)
    #     print(pos_samples.shape, neg_samples.shape)
    #     break
    # print('train data[0]:', train_data[0])
    # print('train data length:', len(train_data))  # 8550 + 7259 x 12 = 95658
    # print('test data length:', len(test_data))  # 5130 + 1728 x 12  = 25866

    # load sketch data
    sk_train_data = SketchDataset(path=sk_load, transform=transforms.ToTensor(), split='train')
    sk_test_data = SketchDataset(path=sk_load, transform=transforms.ToTensor(), split='test')
    sk_train_loader = DataLoader(dataset=sk_train_data, batch_size=64, shuffle=True)
    sk_test_loader = DataLoader(dataset=sk_test_data, batch_size=64, shuffle=True)
    for i, data in enumerate(sk_train_loader):
        sk, label, id = data
        # sk shape: torch.Size([64, 3, 224, 224])
        # sk label shape: torch.Size([64])
        print('sk shape:', sk.shape, 'sk label shape:', label.shape, 'id: ', id)
        pos_samples, neg_samples = sk_train_data.get_contrast_sample(label)
        print(pos_samples.shape, neg_samples.shape)
        break
    print('sketch train data[0]:', sk_train_data[0])
    print('sketch train data length:', len(sk_train_data))  # 2400
    print('sketch test data length:', len(sk_test_data))  # 1440

    # load 3D-views data
    cad_train_data = ModelViewDataset(path=cad_load, transform=transforms.ToTensor(), split='train')
    cad_test_data = ModelViewDataset(path=cad_load, transform=transforms.ToTensor(), split='test')
    cad_train_loader = DataLoader(dataset=cad_train_data, batch_size=64, shuffle=True)
    cad_test_loader = DataLoader(dataset=cad_test_data, batch_size=64, shuffle=True)
    for i, data in enumerate(cad_train_loader):
        x, label, id = data
        print(id)
        # x shape: torch.Size([64, 12, 3, 224, 224])
        # x label shape: torch.Size([64])
        print('views shape:', x.shape, 'views label shape:', label.shape, 'len(id):', len(id))

        # c: 3, h: 224, w: 224
        c, h, w = x.size()[-3:]
        # x view: torch.Size([768, 1, 224, 224])
        x = x.view(-1, c, h, w)
        print('c, h, w:', c, h, w)
        print('x view:', x.shape)
        batch = int(x.size(0))

        x0_index = (torch.arange(batch) % 12 < 6)
        x1_index = (torch.arange(batch) % 12 >= 6)

        # x0.shape: torch.Size([384, 1, 224, 224]) x1.shape: torch.Size([384, 1, 224, 224])
        x0 = x[x0_index]
        x1 = x[x1_index]
        print('x0.shape:', x0.shape, 'x1.shape:', x1.shape)

        # get_contrast_sample
        pos, neg = cad_train_data.get_contrast_sample(label)
        pos = pos.view(-1, 1, 224, 224)
        neg = neg.view(-1, 1, 224, 224)
        print('get_contrast_sample: ', pos.shape, neg.shape)
        break
    print('3d-model-views train data[0]:', cad_train_data[0])
    print('3d-model-views train data length:', len(cad_train_data))  # 5812/7259/1042
    print('3d-model-views test data length:', len(cad_test_data))  # 1426/1728/216

    # sketch & shape features
    # feature_train_data = FeatureDataset(feature_path, 'train')
    # feature_test_data = FeatureDataset(feature_path, 'test')
    # feature_train_loader = DataLoader(dataset=feature_train_data, batch_size=64, shuffle=True)
    # feature_test_loader = DataLoader(dataset=feature_test_data, batch_size=64, shuffle=True)
    # for i, data in enumerate(feature_train_loader):
    #     # f shape: torch.Size([64, 1, 2048])
    #     # label shape: torch.Size([64])
    #     f, label, id = data
    #     print('feature shape:', f.shape, 'feature label shape:', label.shape, 'len(id):', len(id))
    #     break
    # print('feature train data[0]:', feature_train_data[0])
    # print('feature train data length:', len(feature_train_data))  # 2400 + 5812 = 8212
    # print('feature test data length:', len(feature_test_data))  # 1440 + 1426 = 2866

    # -----------------ready the dataset with lightly--------------------------
    # import lightly.data as data
    # views_dir = os.path.join('.', 'data', 'views')
    # dataset_cad = data.LightlyDataset(input_dir=views_dir)
    # dataset_sk = data.LightlyDataset(input_dir=sk_dir)
    # for i in range(80):
    #     sample, target, fname = dataset_cad[i]
    #     print(sample, target, fname, len(dataset_cad))