# embedding_path = 'test_embeddings.pkl'
# pickle.load(open(embedding_path, 'rb'), encoding='latin1')
# keys -> image_names; labels -> image labels; embeddings -> image embeddings (instances, 640) <numpy.ndarray>

import random
import os
import pickle

import mindspore
import numpy as np
import itertools
import mindspore.ops as ops
from mindspore import Tensor


class Data_Utils(object):
    """docstring for Data_Utils:(参数解析器，配置数据)"""
    def __init__(self, train, seed, way, shot, 
                data_path, dataset_name, embedding_crop, 
                batch_size, val_batch_size, test_batch_size, 
                meta_val_steps, embedding_size, verbose):
        super(Data_Utils, self).__init__()
        self.train = train
        self.seed = seed
        self.way = way
        self.shot = shot
        self.data_path = data_path
        self.dataset_name = dataset_name
        self.embedding_crop = embedding_crop
        self.batch_size = batch_size
        self.val_batch_size = val_batch_size
        self.test_batch_size = test_batch_size
        self.meta_val_steps = meta_val_steps
        self.embedding_size = embedding_size
        self.verbose = verbose

        if self.train:
            self.metasplit = ['train', 'val']
        else:
            self.metasplit = ['test']

        random.seed(self.seed)
        self.construct_data()

    def construct_data(self):
        # loading embeddings
        """读取数据集:embedding文件夹路径+dataset名称+预训练方案名"""
        self.embedding_path = os.path.join(self.data_path, self.dataset_name, self.embedding_crop)

        self.embeddings = {}
        for d in self.metasplit:
            if self.verbose:
                print('Loading data from ' + os.path.join(self.embedding_path, d+'_embeddings.pkl') + '...')
            """读取数据集"""
            self.embeddings[d] = pickle.load(open(os.path.join(self.embedding_path, d+'_embeddings.pkl'), 'rb'),
                                             encoding='latin1')

        # sort images by class
        self.image_by_class = {}
        self.embed_by_name = {}
        self.class_list = {}
        for d in self.metasplit:
            """依据类对图片进行分类"""
            self.image_by_class[d] = {}
            self.embed_by_name[d] = {}
            self.class_list[d] = set()
            keys = self.embeddings[d]["keys"]
            """获得文件的下标i和文件名k"""
            for i, k in enumerate(keys):
                """分割得到类名，图片名"""
                _, class_name, img_name = k.split('-')
                """出现未收录的类则创建一个新类"""
                if (class_name not in self.image_by_class[d]):
                    self.image_by_class[d][class_name] = []
                """依据类对图片进行分类,共20类"""
                self.image_by_class[d][class_name].append(img_name)
                """匹配每个图片的特征向量"""
                self.embed_by_name[d][img_name] = self.embeddings[d]["embeddings"][i]
                # construct class list
                """生成类的字典"""
                self.class_list[d].add(class_name)

            """将字典转化为列表"""
            self.class_list[d] = list(self.class_list[d])
            if self.verbose:
                print('Finish constructing ' + d + ' data, total %d classes.' % len(self.class_list[d]))

    def get_batch(self, metasplit):
        """N-way K-shot"""
        # train_data -> [batch, N, k, dim]
        # valid_data -> [batch, N, k, dim]
        if metasplit == 'train':
            b_size = self.batch_size
        elif metasplit == 'val':
            b_size = self.val_batch_size
        else:
            b_size = self.test_batch_size
        #print("\nmeta" + metasplit + "_batch_size:" + str(b_size))
        """task: N-way K-shot N(训练集的类别数） K（每个类别的样本数） val_steps(元训练的样本数）"""
        K = self.shot
        N = self.way
        val_steps = self.meta_val_steps

        datasplit = ['train', 'val']
        batch = {}
        for d in datasplit:
            batch[d] = {'input': [], 'target': [], 'name': []}

        for b in range(b_size):
            """复制类别列表并随机打乱"""
            shuffled_classes = self.class_list[metasplit].copy()
            random.shuffle(shuffled_classes)

            """从打乱后的列表中选择前5个"""
            shuffled_classes = shuffled_classes[:N]
            # print("\n第"+str(b+1)+"个task中的类如下：")
            # print(shuffled_classes)
            #print('meta_train中每个类选'+ str(K) + '张，meta_val中每个类选' + str(val_steps) + '张')

            """输入格式：(meta_train_size=5,meta_val_size=5）"""
            inp = {'train': [[] for i in range(N)], 'val': [[] for i in range(N)]}
            """标签"""
            tgt = {'train': [[] for i in range(N)], 'val': [[] for i in range(N)]}

            """获取选取的类列表中每个元素的下标c和值class_name"""
            for c, class_name in enumerate(shuffled_classes):
                """依次从当前类中选择K+van_steps张图片"""
                images = np.random.choice(self.image_by_class[metasplit][class_name], K + val_steps)
                """metatrain集为第前K张，metaval集为后val_steps张"""
                image_names = {'train': images[:K], 'val': images[K:]}
                #print(image_names)

                for d in datasplit:
                    num_images = K if d == 'train' else val_steps
                    """判断meta集中的图片数量是否正确"""
                    assert len(image_names[d]) == num_images
                    for i in range(num_images):
                        """获取meta集中每个图片的特征向量"""
                        embed = self.embed_by_name[metasplit][image_names[d][i]]
                        """输入为每个图片的特征向量"""
                        inp[d][c].append(embed)
                        """标签"""
                        tgt[d][c].append(c)

            for d in datasplit:
                num_images = K if d == 'train' else val_steps

                assert (len(inp['train']) == N)
                assert (len(inp['val']) == N)

                '''排列组合[0:4]'''
                permutations = list(itertools.permutations(range(N)))
                order = random.choice(permutations)
                '''对类的图片和标签进行随机排序'''
                inputs = [inp[d][i] for i in order]
                target = [tgt[d][i] for i in order]

                '''将随机排序后的转化为数组后存入batch'''
                batch[d]['input'].append(np.asarray(inputs).reshape(N, num_images, -1))
                batch[d]['target'].append(np.asarray(target).reshape(N, num_images, -1))

        # convert to tensor
        for d in datasplit:
            num_images = K if d == 'train' else val_steps
            normalized_input = Tensor(np.array(batch[d]['input']), mindspore.float32)
            batch[d]['input'] = ops.L2Normalize(axis=-1)(normalized_input)
            #print('\n' + d + "输入归一化成功！")
            batch[d]['target'] = Tensor.from_numpy(np.array(batch[d]['target']))

            assert (batch[d]['input'].shape == (b_size, N, num_images, self.embedding_size))
            assert (batch[d]['target'].shape == (b_size, N, num_images, 1))
        return batch
