import os
from glob import glob
from os.path import join

import torch
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
from torchvision import models
from torchvision.transforms import transforms
from tqdm import tqdm

from common_utils.constants import Constants
from common_utils.utils import chunks

device = 'cuda' if torch.cuda.is_available() else 'cpu'


class MyDataset(Dataset):
    '''
    Inputs
        data_dir String 数据集根目录
        dataset String 数据集
        phase String train, val, test
    '''

    def __init__(self, data_dir, dataset, phase, model, update_features=False, isGzsl=False):
        self.data_dir = data_dir
        self.dataset = dataset
        self.phase = phase
        self.update_features = update_features
        self.imageLoader = ImageLoader(join(self.data_dir, "JPEGImages"))

        if not self.update_features:
            feat_file = join(self.data_dir, model + '_' + self.phase + '_features.t')
            print(f'Using {model} and feature file {feat_file}')
            if not os.path.exists(feat_file):
                with torch.no_grad():
                    self.generate_features(feat_file, model)
            activation_data = torch.load(feat_file)
            self.activations = dict(
                zip(activation_data[Constants.FILES], activation_data[Constants.FEATURES])
            )
            self.feat_dim = activation_data[Constants.FEATURES].size(1)
        classes, train_classes, test_classes = self.get_cleasses_split()
        self.attrs = self.get_attrs()
        self.train_data = self.getDataInfo(train_classes)
        self.test_data = self.getDataInfo(test_classes)
        self.all_data = self.getDataInfo(classes)
        if self.phase == 'train':
            if isGzsl:
                self.data = self.all_data
            else:
                self.data = self.train_data
        elif self.phase == 'test':
            if isGzsl:
                self.data = self.all_data
            else:
                self.data = self.test_data
        else:
            raise ValueError("phase not define")
        self.classes = classes

    def __getitem__(self, index):
        imagePath, label = self.data[index]
        label = self.classes.index(label)
        label = torch.tensor(label, dtype=torch.long)
        attr_id = self.attrs[label]
        # attr_id = attr_id.long()
        attr_id[attr_id == 0] = torch.tensor(-1, dtype=torch.float32)
        if not self.update_features:
            img = self.activations[imagePath]
        else:
            img = self.imageLoader(imagePath)
            img = self.dataset_transform(img)
        return img, label, attr_id

    def __len__(self):
        return len(self.data)

    def get_attrs(self):
        # filename = 'predicate-matrix-continuous.txt'
        filename = 'predicate-matrix-binary.txt'
        attrs = []
        with open(join(self.data_dir, filename)) as f:
            lines = f.read().strip().split('\n')
            attrs = [list(map(float, line.split())) for line in lines]
        attrs = torch.tensor(attrs)
        attrs.mean
        return attrs

    def get_cleasses_split(self):
        classesFileName = 'classes.txt'
        train_fileName = 'trainclasses.txt'
        test_fileName = 'testclasses.txt'
        classes = []
        with open(join(self.data_dir, classesFileName), 'r') as f:
            pairs = [p.split('\t')[1].strip() for p in f.read().strip().split('\n')]
            classes.extend(pairs)
        train_classes = []
        with open(join(self.data_dir, train_fileName), 'r') as f:
            pairs = f.read().strip().split('\n')
            train_classes.extend(pairs)
        test_classes = []
        with open(join(self.data_dir, test_fileName), 'r') as f:
            pairs = f.read().strip().split('\n')
            test_classes.extend(pairs)
        return classes, train_classes, test_classes

    def getDataInfo(self,classes):
        imageImageRootPath = join(self.data_dir, "JPEGImages")
        files_all = []
        labels = []
        for clz in classes:
            files_before = glob(join(imageImageRootPath, clz, '*.jpg'), recursive=True)
            curr_files = []
            curr_labels = []
            for current in files_before:
                parts = current.replace('\\', '/').split('/')
                imagePath = join(parts[-2], parts[-1])

                curr_labels.append(clz)
                curr_files.append(imagePath)
            files_all.extend(curr_files)
            labels.extend(curr_labels)
        data = list(zip(files_all, labels))
        return data

    def generate_features(self, out_file, model):
        dataPath = join(self.data_dir, 'JPEGImages')
        files_all = []
        # recursive 递归获取
        files_before = glob(join(dataPath, '**', '*.jpg'), recursive=True)
        for current in files_before:
            parts = current.replace('\\', '/').split('/')
            files_all.append(join(parts[-2], parts[-1]))
        imageExtractor = get_image_extractor(arch=model, feature_dim=1024).eval().to(device)
        image_feats = []
        image_files = []
        transform = self.dataset_transform(self.phase)
        for files in tqdm(chunks(files_all, 512), total=len(files_all) // 512, desc=f'EXTRACTING features {model}'):
            imgs = list(map(self.imageLoader, files))
            imgs = list(map(transform, imgs))
            feats = imageExtractor(torch.stack(imgs, 0).to(device))
            image_feats.append(feats.data.cpu())
            image_files += files
        image_feats = torch.cat(image_feats, 0)
        print('features for %d images generated' % (len(image_files)))
        torch.save({Constants.FEATURES: image_feats, Constants.FILES: image_files}, out_file)

    def dataset_transform(self, phase):
        '''
        :param phase:  String train、test、val
        :return:
        '''
        mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
        if phase == 'train':
            transform = transforms.Compose([
                transforms.RandomResizedCrop(224),  # 随机裁剪为不同的大小和宽高比，然后缩放所裁剪得到的图像为制定的大小
                transforms.RandomHorizontalFlip(),  # 依概率p垂直翻转，默认0.5
                transforms.ToTensor(),  # 将输入的数据shape H，W，C ——> C，H，W,将所有数除以255，将数据归一化到【0，1】
                transforms.Normalize(mean, std)  # 归一化
            ])
        elif phase == 'val' or phase == 'test':
            transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)
            ])
        elif phase == 'all':
            transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)
            ])
        else:
            raise ValueError('Invalid transform')
        return transform


def get_image_extractor(arch='resnet18', pretrained=True, feature_dim=None, checkpoint=''):
    '''
    :param arch:  Base architecture
    :param pretrained: Bool，Imagenet权重
    :param feature_dim: Int, output feature dimension
    :param checkpoint: String, not implemented
    :return:
    '''
    if arch == 'resnet18':
        model = models.resnet18(pretrained=pretrained)
        if feature_dim is None:
            model.fc = nn.Sequential()
        else:
            model.fc = nn.Linear(512, feature_dim)
    if arch == 'resnet101':
        model = models.resnet101(pretrained=pretrained)
        if feature_dim is None:
            model.fc = nn.Sequential()
        else:
            model.fc = nn.Linear(2048, feature_dim)
    return model


class ImageLoader:
    def __init__(self, root):
        self.root_dir = root

    def __call__(self, img):
        img = Image.open(join(self.root_dir, img)).convert('RGB')  # We don't want alpha
        return img
