from os.path import join

import numpy as np
import torch
from PIL import Image
from scipy import io
from torch.utils.data import Dataset

device = 'cuda' if torch.cuda.is_available() else 'cpu'


class Dataset(Dataset):
    '''
    Inputs
        data_dir String 数据集根目录
        dataset String 数据集
        phase String train, val, test
    '''

    def __init__(self, data_dir, dataset, phase,image_root,transform = None,xlsa17_dir='E:datasets/xlsa17/data/'):
        self.data_dir = data_dir
        self.dataset = dataset
        self.phase = phase
        self.transform = transform
        self.image_root = image_root


        res101 = io.loadmat(join(xlsa17_dir,dataset,'res101.mat'))
        att_splits = io.loadmat(join(xlsa17_dir,dataset,'att_splits.mat'))
        train_loc = np.squeeze(att_splits['train_loc'] - 1) # czsl 训练集位置
        val_loc = np.squeeze(att_splits['val_loc'] - 1) # czsl 测试集位置
        trainval_loc = np.squeeze(att_splits['trainval_loc'] - 1) # gzsl 训练集 位置
        test_seen_loc = np.squeeze(att_splits['test_seen_loc'] - 1) # gzsl 可见类 位置
        test_unseen_loc = np.squeeze(att_splits['test_unseen_loc'] - 1) # gzsl 不可见类位置
        image_files = res101['image_files']
        # 获取数据集里图片相对路径
        if dataset == 'AWA2':
            image_files = np.array([im_f[0][0].split('JPEGImages/')[-1] for im_f in image_files])
        else:
            image_files = np.array([im_f[0][0].split('images/')[-1] for im_f in image_files])

        # 取出训练好的特征
        feat = res101['features'].T
        # feat = l2_normalize(feat, dim=1) # l2正则化
        # self.feat =feat
        self.feat_dim = feat.shape[1] # 特征维度
        # czsl 训练集图片特征，由于train_loc是从1开始存储的，所以这里取出来时，下标-1
        self.train_feat = feat[train_loc]
        # czsl 测试集图片特征
        self.val_feat = feat[val_loc]
        # gzsl 训练集图片特征
        self.trainval_feat = feat[trainval_loc]
        # gzsl 测试集可见类图片特征
        self.test_seen_feat = feat[test_seen_loc]
        # gzsl 测试集不可见类图片特征
        self.test_unseen_feat = feat[test_unseen_loc]
        print('Tr:{}; Val:{}; Ts:{}\n'.format(self.train_feat.shape[0], self.val_feat.shape[0], self.test_unseen_feat.shape[0]))

        # 标签
        labels = res101['labels']
        # 全部数据集便签，由于便签是从1开始存储的，所以这里取出来时，下标-1，方便程序使用
        self.labels_all = torch.from_numpy(labels)
        # 获取图片位置对应的标签
        self.labels_train = torch.from_numpy(labels[train_loc]).squeeze().long()
        self.labels_val = torch.from_numpy(labels[val_loc]).squeeze().long()
        self.labels_trainval = torch.from_numpy(labels[trainval_loc]).squeeze().long()
        self.labels_test_seen = torch.from_numpy(labels[test_seen_loc]).squeeze().long()
        self.labels_test_unseen = torch.from_numpy(labels[test_unseen_loc]).squeeze().long()
        self.labels_test = torch.cat((self.labels_test_seen,self.labels_test_unseen), dim=0)

        # unique函数去除其中重复的元素，并按元素由大到小返回一个新的无元素重复的元组或者列表
        # 真实标签 [ 1  2  3  6  8 10 11 13 14 17 18 19 22 27 28 32 33 35 36 37 38 42 43 44, 45 46 49]
        self.train_classes_seen = np.unique(self.labels_train)
        # [ 4  5 12 15 16 20 21 25 26 29 39 40 48]
        self.val_classes_seen = np.unique(self.labels_val)
        # [ 1  2  3  4  5  6  8 10 11 12 13 14 15 16 17 18 19 20 21 22 25 26 27 28, 29 32 33 35 36 37 38 39 40 42 43 44 45 46 48 49]
        self.trainval_classes_seen = np.unique(self.labels_trainval)
        # [ 1  2  3  4  5  6  8 10 11 12 13 14 15 16 17 18 19 20 21 22 25 26 27 28, 29 32 33 35 36 37 38 39 40 42 43 44 45 46 48 49]
        self.test_classes_seen = np.unique(self.labels_test_seen)
        # [ 7  9 23 24 30 31 34 41 47 50] -1 = [6 8 22 23 29 30 33 40 46 49]
        self.test_classes_unseen = np.unique(self.labels_test_unseen)
        # [ 1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24, 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48, 49 50]
        self.test_classes = np.unique(self.labels_test) # All Classes of the dataset
        # 获取已经归一化的属性矩阵，50*85, 50个分类，每个分类85个属性
        self.sig =torch.tensor( att_splits['att'].T, dtype=torch.float32)
        self.attr_dim = self.sig.shape[1]
        # Shape -> (Number of attributes, Number of Classes)
        self.train_attrs = self.sig[(self.train_classes_seen -1).tolist()]
        self.val_attrs = self.sig[(self.val_classes_seen  -1).tolist()]
        self.trainval_attrs = self.sig[(self.trainval_classes_seen  -1).tolist()]
        self.test_attrs_seen = self.sig[(self.test_classes_seen  -1).tolist()]
        self.test_attrs_unseen = self.sig[(self.test_classes_unseen  -1).tolist()]
        self.test_attrs = self.sig[(self.test_classes  -1).tolist()]

        if self.phase == 'train':
            self.feats = self.train_feat
            self.labels = self.labels_train
            self.attrs = self.train_attrs
            self.image_files = image_files[self.labels_train]
        elif self.phase == 'val':
            self.feats = self.val_feat
            self.labels = self.labels_val
            self.attrs = self.val_attrs
        elif  self.phase == 'test':
            self.feats = self.test_unseen_feat
            self.labels = self.labels_test_unseen
            self.attrs = self.test_attrs_unseen
        elif self.phase == 'trainval':
            self.feats = self.trainval_feat
            self.labels = self.labels_trainval
            self.attrs = self.trainval_attrs
        elif self.phase == 'gzsql_test':
            self.feats = torch.cat((self.test_unseen_feat, self.test_seen_feat), dim=0)
            self.labels = torch.cat((self.labels_test_unseen,self.labels_test_seen),dim=0)
            self.attrs = self.sig
        elif  self.phase == 'all':
            self.feats = self.train_feat + self.val_feat + self.test_unseen_feat
            self.labels = labels
            self.attrs = self.sig
        else:
            raise NotImplemented
        counts = {}
        ## 统计长尾分布
        for clz in self.trainval_classes_seen:
            count = torch.nonzero(self.labels_all == clz).shape[0]
            counts[clz] = count
        # sorted(counts)
        # print(counts)
        ss = "看下数据集"
    def __getitem__(self, index):
        # img = self.data[index]
        # read the iterable image
        if self.transform is not None:
            img_pil = Image.open(join(self.image_root, self.image_files[index])).convert("RGB")
            img = self.transform(img_pil)
        else:
            img = torch.Tensor()
        label = self.labels[index]
        attr = self.sig[label]
        feat = self.feats[index]
        return feat, label, attr

    def __len__(self):
        return len(self.feats)