from scipy import io, spatial
import os
from glob import glob
from os.path import join

import numpy as np
import torch
from PIL import Image
from sklearn import preprocessing
from torch import nn
from torch.utils.data import Dataset
from torchvision import models
from torchvision.transforms import transforms
from tqdm import tqdm

from common_utils.constants import Constants
from common_utils.utils import chunks, l2_normalize

device = 'cuda' if torch.cuda.is_available() else 'cpu'


class MyDataset(Dataset):
    '''
    这个数据集是使用了自己提取特征的数据集
    Inputs
        data_dir String 数据集根目录
        dataset String 数据集
        phase String train, val, test
    '''

    def __init__(self, data_dir, dataset, phase, model, update_features=False):
        self.data_dir = data_dir
        self.dataset = dataset
        self.phase = phase
        self.update_features = update_features

        res101 = torch.load(data_dir +'/'+ 'res101.t')
        att_splits = torch.load(data_dir +'/'+ 'att_splits.t')
        train_loc = 'train_loc'
        val_loc = 'val_loc'
        test_loc = 'test_unseen_loc'
        # 取出训练好的特征
        feat = res101['features']
        feat = torch.stack(feat,dim=0)
        self.feat_dim = feat[0].shape[0]
        # 提取训练图片的位置
        self.train_data = feat[np.squeeze(att_splits[train_loc])]
        self.train_data = torch.tensor(self.train_data,dtype=torch.float32)
        self.val_data = feat[np.squeeze(att_splits[val_loc])]
        self.val_data = torch.tensor(self.val_data, dtype=torch.float32)
        self.test_data = feat[np.squeeze(att_splits[test_loc])]
        self.test_data = torch.tensor(self.test_data, dtype=torch.float32)
        print('Tr:{}; Val:{}; Ts:{}\n'.format(self.train_data.shape[1], self.val_data.shape[1], self.test_data.shape[1]))

        labels = np.array(res101['labels'])
        # 获取图片位置对于的分类
        self.labels_train = np.squeeze(labels[np.squeeze(att_splits[train_loc])])
        self.labels_val = np.squeeze(labels[np.squeeze(att_splits[val_loc])])
        self.labels_test = np.squeeze(labels[np.squeeze(att_splits[test_loc])])

        # unique函数去除其中重复的元素，并按元素由大到小返回一个新的无元素重复的元组或者列表
        self.train_classes = np.unique(self.labels_train)
        self.val_classes = np.unique(self.labels_val)
        self.test_classes = np.unique(self.labels_test)

        i = 0
        for labels in self.train_classes:
            self.labels_train[self.labels_train == labels] = i
            i += 1

        j = 0
        for labels in self.val_classes:
            self.labels_val[self.labels_val == labels] = j
            j += 1

        k = 0
        for labels in self.test_classes:
            self.labels_test[self.labels_test == labels] = k
            k += 1
        # 获取已经归一化的属性矩阵，50*85, 50个分类，每个分类85个属性
        self.sig = att_splits['att']
        # self.sig = sig.T
        # Shape -> (Number of attributes, Number of Classes)
        self.train_attrs = self.sig[self.train_classes]
        self.train_attrs = torch.tensor(self.train_attrs, dtype=torch.float32)
        self.val_attrs = self.sig[self.val_classes]
        self.val_attrs = torch.tensor(self.val_attrs, dtype=torch.float32)
        self.test_attrs = self.sig[self.test_classes]
        self.test_attrs = torch.tensor(self.test_attrs, dtype=torch.float32)
        if self.phase == 'train':
            self.data =self.train_data
            self.attr = self.train_attrs
        elif  self.phase == 'val':
            self.data =self.val_data
            self.attr = self.val_attrs
        else:
            self.data = self.test_data
            self.attr = self.test_attrs

    def __getitem__(self, index):
        img = self.data[index]
        if self.phase == 'train':
            label = self.labels_train[index]
            label = torch.tensor(label, dtype=torch.long)
            attr_id = self.train_attrs[label]
        elif self.phase == 'val':
            label = self.labels_val[index]
            label = torch.tensor(label, dtype=torch.long)
            attr_id = self.val_attrs[label]
        else:
            label = self.labels_test[index]
            label = torch.tensor(label, dtype=torch.long)
            attr_id = self.test_attrs[label]
        # img = torch.tensor(img)
        img = l2_normalize(img, dim=0)
        attr_id = torch.tensor(attr_id)
        return img, label, attr_id

    def __len__(self):
        return len(self.data)

    def get_attrs(self,curr_classes):
        filename = 'predicate-matrix-continuous.txt'
        # filename = 'predicate-matrix-binary.txt'
        attrs = []
        with open(join(self.data_dir, filename)) as f:
            lines = f.read().strip().split('\n')
            attrs = [list(map(float, line.split())) for line in lines]
        attrs = torch.tensor(attrs)
        idx = [idx for idx,clz in enumerate(self.classes) if clz in curr_classes]
        curr_attrs = attrs[idx]
        curr_attrs = x_normalized(curr_attrs,dim=1)
        return curr_attrs

    def get_cleasses_split(self):
        classesFileName = 'classes.txt'
        train_fileName = 'trainclasses.txt'
        test_fileName = 'testclasses.txt'
        classes = []
        with open(join(self.data_dir, classesFileName), 'r') as f:
            pairs = [p.split('\t')[1].strip() for p in f.read().strip().split('\n')]
            classes.extend(pairs)
        train_classes = []
        with open(join(self.data_dir, train_fileName), 'r') as f:
            pairs = f.read().strip().split('\n')
            train_classes.extend(pairs)
        test_classes = []
        with open(join(self.data_dir, test_fileName), 'r') as f:
            pairs = f.read().strip().split('\n')
            test_classes.extend(pairs)
        return classes, train_classes, test_classes

    def getDataInfo(self,classes):
        imageImageRootPath = join(self.data_dir, "JPEGImages")
        files_all = []
        labels = []
        for clz in classes:
            files_before = glob(join(imageImageRootPath, clz, '*.jpg'), recursive=True)
            curr_files = []
            curr_labels = []
            for current in files_before:
                parts = current.replace('\\', '/').split('/')
                imagePath = join(parts[-2], parts[-1])

                curr_labels.append(clz)
                curr_files.append(imagePath)
            files_all.extend(curr_files)
            labels.extend(curr_labels)
        data = list(zip(files_all, labels))
        return data

    def generate_features(self, out_file, model):
        dataPath = join(self.data_dir, 'JPEGImages')
        files_all = []
        # recursive 递归获取
        files_before = glob(join(dataPath, '**', '*.jpg'), recursive=True)
        for current in files_before:
            parts = current.replace('\\', '/').split('/')
            files_all.append(join(parts[-2], parts[-1]))
        imageExtractor = get_image_extractor(arch=model, feature_dim=1024).eval().to(device)
        image_feats = []
        image_files = []
        transform = self.dataset_transform(self.phase)
        for files in tqdm(chunks(files_all, 512), total=len(files_all) // 512, desc=f'EXTRACTING features {model}'):
            imgs = list(map(self.imageLoader, files))
            imgs = list(map(transform, imgs))
            feats = imageExtractor(torch.stack(imgs, 0).to(device))
            image_feats.append(feats.data.cpu())
            image_files += files
        image_feats = torch.cat(image_feats, 0)
        print('features for %d images generated' % (len(image_files)))
        torch.save({Constants.FEATURES: image_feats, Constants.FILES: image_files}, out_file)
    def dataset_transform(self, phase):
        '''
        :param phase:  String train、test、val
        :return:
        '''
        # Imagenet数据集的均值和方差为：mean=(0.485, 0.456, 0.406)，std=(0.229, 0.224, 0.225)
        mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
        if phase == 'train':
            transform = transforms.Compose([
                transforms.RandomResizedCrop(224),  # 随机裁剪为不同的大小和宽高比，然后缩放所裁剪得到的图像为制定的大小
                transforms.RandomHorizontalFlip(),  # 依概率p垂直翻转，默认0.5
                transforms.ToTensor(),  # 将输入的数据shape H，W，C ——> C，H，W,将所有数除以255，将数据归一化到【0，1】
                transforms.Normalize(mean, std)  # 归一化
            ])
        elif phase == 'val' or phase == 'test':
            transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)
            ])
        elif phase == 'all':
            transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)
            ])
        else:
            raise ValueError('Invalid transform')
        return transform


def get_image_extractor(arch='resnet18', pretrained=True, feature_dim=None, checkpoint=''):
    '''
    :param arch:  Base architecture
    :param pretrained: Bool，Imagenet权重
    :param feature_dim: Int, output feature dimension
    :param checkpoint: String, not implemented
    :return:
    '''
    if arch == 'resnet18':
        model = models.resnet18(pretrained=pretrained)
        if feature_dim is None:
            model.fc = nn.Sequential()
        else:
            model.fc = nn.Linear(512, feature_dim)
    if arch == 'resnet101':
        model = models.resnet101(pretrained=pretrained)
        if feature_dim is None:
            model.fc = nn.Sequential()
        else:
            model.fc = nn.Linear(2048, feature_dim)
    return model


class ImageLoader:
    def __init__(self, root):
        self.root_dir = root

    def __call__(self, img):
        img = Image.open(join(self.root_dir, img)).convert('RGB')  # We don't want alpha
        return img


def x_normalized(x:torch.Tensor,dim=0):
    """
    https://blog.csdn.net/a384504062/article/details/103827166
    Z-score标准化（0-1标准化）方法
    x_normalized = (x - mean) / std
    去均值方差归一化
    """
    mean = torch.mean(x,dim=dim).unsqueeze(-1)
    std = torch.std(x,dim=dim).unsqueeze(-1)
    normalized = (x-mean)/std
    mean2 = torch.mean(normalized, dim=dim)
    std2 = torch.std(normalized, dim=dim)
    return normalized