import os
import pickle
import mindspore as ms
from mindspore import Tensor
import mindspore.dataset as ds
from .data_utils import get_rank_info, get_num_parallel_workers
import numpy as np


def get_trans(train_image_size, train=True):
    trans = []
    if train:
        trans += [
            ds.vision.c_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
        ]

    trans += [
        ds.vision.c_transforms.Resize((train_image_size, train_image_size)),
        ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
        ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
        ds.vision.c_transforms.HWC2CHW()
    ]

    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)

    return trans, type_cast_op


class CIFAR100(object):
    train_list = [
        'train',
    ]

    test_list = [
        'test',
    ]

    def __init__(self, root, train):
        super(CIFAR100, self).__init__()

        self.root = root
        self.train = train  # training set or test set

        if self.train:
            downloaded_list = self.train_list
        else:
            downloaded_list = self.test_list
        self.data = []
        self.targets = []

        # now load the picked numpy arrays
        for file_name in downloaded_list:
            file_path = os.path.join(self.root, file_name)
            with open(file_path, 'rb') as f:
                entry = pickle.load(f, encoding='latin1')
                self.data.append(entry['data'])
                if 'labels' in entry:
                    self.targets.extend(entry['labels'])
                else:
                    self.targets.extend(entry['fine_labels'])

        self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
        self.data = self.data.transpose((0, 2, 3, 1))  # convert to HWC

    def __getitem__(self, index):
        img, target = self.data[index], self.targets[index]
        return img, target


    def __len__(self):
        return len(self.data)


def create_dataset(dataset_path, train, batch_size=32, train_image_size=224, distribute=True):
    device_num, rank_id = get_rank_info(distribute) 
    trans, type_cast_op = get_trans(train_image_size, train)

    cifar100_data = CIFAR100(root=dataset_path, train=train)

    input_data = ds.GeneratorDataset(source=cifar100_data, column_names=["image", "label"], num_shards=device_num, shard_id=rank_id, shuffle=True)

    input_data = input_data.map(operations=type_cast_op, input_columns="label", num_parallel_workers=get_num_parallel_workers(8))
    input_data = input_data.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(8))
    input_data = input_data.batch(batch_size, drop_remainder=True)

    return input_data, input_data.get_dataset_size() * batch_size


class CIFAR100Instance(CIFAR100):
    def __init__(self, root, train, k):
        super(CIFAR100Instance, self).__init__(root, train)
        self.num_classes = 100
        self.k = k
        self.cls_positive = [[] for i in range(self.num_classes)]
        self.cls_negative = [[] for i in range(self.num_classes)]
        self.vis = [0 for _ in range(len(self.data))]
        self.cache = [None for _ in range(len(self.data))]

    def __getitem__(self, index):
        img, target = self.data[index], self.targets[index]
        if len(self.cls_negative[target]) == 0:
            return index, img, target, index
        else:
            if self.vis[index] == 0:
                replace = True if self.k > len(self.cls_negative[target]) else False
                neg_idx = np.random.choice(self.cls_negative[target], self.k, replace=replace)
                neg_idx = np.hstack((np.asarray([index]), neg_idx))
                self.cache[index] = neg_idx.astype(np.float32)
                self.vis[index] = 1
            return index, img, target, self.cache[index]
    
    def update_pos(self, y, ind):
        for i in range(ind.shape[0]):
            self.cls_positive[y[i]].append(ind[i])
    
    def update_neg(self):
        for i in range(self.num_classes):
            for j in range(self.num_classes):
                if j == i:
                    continue
                self.cls_negative[i].extend(self.cls_positive[j])
        self.cls_negative = [np.asarray(self.cls_negative[i]) for i in range(self.num_classes)]


def create_crd_dataset(dataset_path, opt, train=True, batch_size=32, train_image_size=224, distribute=True):
    
    device_num, rank_id = get_rank_info(distribute) 
    trans, type_cast_op = get_trans(train_image_size, train)

    cifar100_data = CIFAR100Instance(root=dataset_path, train=train, k=opt.k)

    input_data = ds.GeneratorDataset(source=cifar100_data, column_names=["idx", "image", "label", "neg"], num_shards=device_num, shard_id=rank_id, shuffle=True)

    input_data = input_data.map(operations=type_cast_op, input_columns="idx", num_parallel_workers=get_num_parallel_workers(8))
    input_data = input_data.map(operations=type_cast_op, input_columns="label", num_parallel_workers=get_num_parallel_workers(8))
    input_data = input_data.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(8))
    input_data = input_data.batch(batch_size, drop_remainder=True)
    
    for idx, (ind, X, y, _) in enumerate(input_data.create_tuple_iterator()):
        cifar100_data.update_pos(y, ind)
    
    cifar100_data.update_neg()

    return input_data, input_data.get_dataset_size() * batch_size
