import os
from glob import glob
from os.path import join

import numpy as np
import torch
from PIL import Image
from torch import nn
from torchvision import models
from torchvision.transforms import transforms
from tqdm import tqdm

from common_utils.constants import Constants
from common_utils.utils import chunks, standardScaler

device = 'cuda' if torch.cuda.is_available() else 'cpu'


class FeatureExtractor:
    """
    特征提取器
    """

    def __init__(self, data_dir, image_root, classes_path, model, train_classes_path,
                 test_classes_path,val_classes_path=None):
        self.data_dir = data_dir
        self.model = model
        self.image_root = image_root.replace("\\", "/")
        self.classes_path = classes_path
        self.train_classes_path = train_classes_path
        self.test_classes_path = test_classes_path
        self.val_classes_path = val_classes_path

        self.imageLoader = ImageLoader(root=image_root)

        self.feat_file = join(data_dir, "res101.t")  # 特征文件保存路径
        self.att_splits_file = join(data_dir, "att_splits.t")  # 属性划分文件保存路径

        self.allclasses_names,self.train_classes_names,self.test_classes_names,self.val_classes_names \
            = self.read_cleasses(classes_path,train_classes_path,test_classes_path,val_classes_path)

    def execute(self):
        # Constants.FEATURES: image_feats, Constants.FILES: image_files, Constants.LABELS: labels
        checiont = torch.load(self.feat_file)
        if checiont is None:
            image_feats, image_files, labels = self.generate_features(self.model)
        else:
            labels = checiont[Constants.LABELS]
        train_loc, val_loc, test_loc = self.generate_att_splits(labels)

    def generate_att_splits(self,labels):
        # labels = np.array(labels)
        train_classes_labels = np.argwhere(np.isin(self.allclasses_names,self.train_classes_names))
        test_classes_labels = np.argwhere(np.isin(self.allclasses_names,self.test_classes_names))
        val_classes_labels = np.argwhere(np.isin(self.allclasses_names,self.val_classes_names))
        original_att = self.read_origin_attrs(self.allclasses_names)
        att = standardScaler(original_att, dim=1)
        train_loc = np.argwhere(np.isin(labels,train_classes_labels))
        val_loc = np.argwhere(np.isin(labels,val_classes_labels))
        test_loc = np.argwhere(np.isin(labels,test_classes_labels))
        torch.save({Constants.ORIGINAL_ATT: original_att, Constants.ATT: att,Constants.TRAIN_LOC: train_loc,
                    Constants.VAL_LOC: val_loc, Constants.TEST_LOC: test_loc}, self.att_splits_file)
        return train_loc, val_loc, test_loc

    def read_origin_attrs(self, curr_classes):
        filename = 'predicate-matrix-continuous.txt'
        # filename = 'predicate-matrix-binary.txt'
        attrs = []
        with open(join(self.data_dir, filename)) as f:
            lines = f.read().strip().split('\n')
            attrs = [list(map(float, line.split())) for line in lines]
        attrs = torch.tensor(attrs)
        # idx = [idx for idx, clz in enumerate(self.allclasses_names) if clz in curr_classes]
        # curr_attrs = attrs[idx]
        # curr_attrs = x_normalized(curr_attrs, dim=1)
        return attrs

    def generate_features(self, model):
        files_all = []
        image_path = join(self.image_root, '**', '*.jpg')
        # recursive 递归获取
        files_before = glob(image_path, recursive=True)
        for current in files_before:
            parts = current.replace('\\', '/').replace(self.image_root + "/", "")
            files_all.append(parts)
        imageExtractor = self.get_image_extractor(arch=model, feature_dim=2048).eval().to(device)
        image_feats = []
        image_files = []
        labels = []
        transform = self.dataset_transform()
        length = 128
        allclasses_names_list = self.allclasses_names.tolist()
        for files in tqdm(chunks(files_all, length), total=len(files_all) // length,
                          desc=f'EXTRACTING features {model}'):
            cur_labels_str = list(clz for clz, _ in map(os.path.split, files))
            cur_labels = list(map(allclasses_names_list.index, cur_labels_str))
            labels.extend(cur_labels)
            imgs = list(map(self.imageLoader, files))
            imgs = list(map(transform, imgs))
            feats = imageExtractor(torch.stack(imgs, 0).to(device))
            image_feats.extend(feats.data.cpu())
            feats = torch.Tensor()
            image_files.extend(files)
        # image_feats = torch.cat(image_feats, 0)
        print('features for %d images generated' % (len(image_files)))
        torch.save({Constants.FEATURES: image_feats, Constants.FILES: image_files, Constants.LABELS: labels}, self.feat_file)
        return image_feats, image_files, np.array(labels)

    def get_image_extractor(self, arch='resnet18', pretrained=True, feature_dim=None, checkpoint=''):
        '''
        :param arch:  Base architecture
        :param pretrained: Bool，Imagenet权重
        :param feature_dim: Int, output feature dimension
        :param checkpoint: String, not implemented
        :return:
        '''
        if arch == 'resnet18':
            model = models.resnet18(pretrained=pretrained)
            if feature_dim is None:
                model.fc = nn.Sequential()
            else:
                model.fc = nn.Linear(512, feature_dim)
        if arch == 'resnet101':
            model = models.resnet101(pretrained=pretrained)
            if feature_dim is None:
                model.fc = nn.Sequential()
            else:
                model.fc = nn.Linear(2048, feature_dim)
        elif arch == 'resnet50':
            model = models.resnet50(pretrained=pretrained)
            if feature_dim is None:
                model.fc = nn.Sequential()
            else:
                model.fc = nn.Linear(2048, feature_dim)

        elif arch == 'resnet50_cutmix':
            model = models.resnet50(pretrained=pretrained)
            checkpoint = torch.load('/home/ubuntu/workspace/pretrained/resnet50_cutmix.tar')
            model.load_state_dict(checkpoint['state_dict'], strict=False)
            if feature_dim is None:
                model.fc = nn.Sequential()
            else:
                model.fc = nn.Linear(2048, feature_dim)

        elif arch == 'resnet152':
            model = models.resnet152(pretrained=pretrained)
            if feature_dim is None:
                model.fc = nn.Sequential()
            else:
                model.fc = nn.Linear(2048, feature_dim)

        elif arch == 'vgg16':
            model = models.vgg16(pretrained=pretrained)
            modules = list(model.classifier.children())[:-3]
            model.classifier = torch.nn.Sequential(*modules)
            if feature_dim is not None:
                model.classifier[3] = torch.nn.Linear(4096, feature_dim)
        return model

    def dataset_transform(self, phase='all'):
        '''
        :param phase:  String train、test、val
        :return:
        '''
        # Imagenet数据集的均值和方差为：mean=(0.485, 0.456, 0.406)，std=(0.229, 0.224, 0.225)
        mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
        if phase == 'train':
            transform = transforms.Compose([
                transforms.RandomResizedCrop(224),  # 随机裁剪为不同的大小和宽高比，然后缩放所裁剪得到的图像为制定的大小
                transforms.RandomHorizontalFlip(),  # 依概率p垂直翻转，默认0.5
                transforms.ToTensor(),  # 将输入的数据shape H，W，C ——> C，H，W,将所有数除以255，将数据归一化到【0，1】
                transforms.Normalize(mean, std)  # 归一化
            ])
        elif phase == 'val' or phase == 'test':
            transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)
            ])
        elif phase == 'all':
            transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)
            ])
        else:
            raise ValueError('Invalid transform')
        return transform

    def read_cleasses(self,classes_path,train_classes_path,test_classes_path,val_classes_path=None):
        classes = []
        with open(classes_path, 'r') as f:
            pairs = [p.split('\t')[1].strip() for p in f.read().strip().split('\n')]
            classes.extend(pairs)
        train_classes = []
        with open(train_classes_path, 'r') as f:
            pairs = f.read().strip().split('\n')
            train_classes.extend(pairs)
        test_classes = []
        with open(test_classes_path, 'r') as f:
            pairs = f.read().strip().split('\n')
            test_classes.extend(pairs)
        val_classes = None
        if val_classes_path is not None:
            with open(val_classes_path, 'r') as f:
                pairs = f.read().strip().split('\n')
                test_classes.extend(pairs)
        else:
            val_len = int(0.2 * len(train_classes) - 1)
            val_classes = train_classes[:val_len]
            train_classes = train_classes[val_len:]
        return np.array(classes),np.array(train_classes),np.array(test_classes),np.array(val_classes)


class ImageLoader:
    def __init__(self, root):
        self.root_dir = root

    def __call__(self, img):
        img = Image.open(join(self.root_dir, img)).convert('RGB')  # We don't want alpha
        return img


if __name__ == '__main__':
    data_dir = 'E:datasets/Animals_with_Attributes2'
    image_root = join(data_dir, 'JPEGImages')
    phase = 'train'
    # feat_file = join(data_dir, "res101.t") # 特征文件保存路径
    # att_splits_file = join(data_dir, "att_splits.t") # 属性划分文件保存路径
    classes_path = join(data_dir, 'classes.txt')
    train_classes_path = join(data_dir, 'trainclasses.txt')
    test_classes_path = join(data_dir, 'testclasses.txt')
    # val_classes_path=join(data_dir,'classes.txt')
    featureExtractor = FeatureExtractor(data_dir=data_dir,image_root=image_root, model='resnet101', classes_path=classes_path,
                                        train_classes_path=train_classes_path, test_classes_path=test_classes_path)
    featureExtractor.execute()
