from __future__ import print_function
from __future__ import division

import torch
import logging
import numpy as np
import sklearn.cluster
from . import evaluation
from . import faissext
from . import data
from . import utils
from collections import defaultdict

cluster_by_class = False

def __get_cluster_labels(model, data_loader, use_penultimate, nb_clusters,
       gpu_id=None, backend='faiss'):
    is_dry_run = (nb_clusters == 1)
    if not is_dry_run:
        if not use_penultimate:
            logging.debug('Using the final layer for clustering')
        X_all, T_all, I_all = utils.predict_batchwise(
            model=model,
            dataloader=data_loader,
            use_penultimate=use_penultimate,
            is_dry_run=False
        )
        perm = np.argsort(I_all)
        X_all = X_all[perm]
        I_all = I_all[perm]
        T_all = T_all[perm]

        if cluster_by_class:
            # {c: [si, ei)}
            border = defaultdict(tuple)
            # calculate the class center vector according to X_all, T_all
            si, sc = 0, T_all[0]
            for i, c in enumerate(T_all):
                if sc!=c:
                    border[sc] = (si, i)
                    si, sc = i, c
            border[sc] = (si, T_all.shape[0])
            cc = []
            for _, v in border.items():
                cc.append(np.mean(X_all[v[0]:v[1]], axis=0))
            CX_all = np.array(cc)

        if backend == 'torch+sklearn':
            clustering_algorithm = sklearn.cluster.KMeans(
                n_clusters=nb_clusters)
            C = clustering_algorithm.fit(X_all).labels_
        elif backend == 'random':
            raise NotImplementedError
        elif backend == 'const':
            div, mod = divmod(len(np.unique(T_all)), nb_clusters)
            C = T_all//div
        else:
          if cluster_by_class:
            # 对类中心向量进行聚类
            C = faissext.do_clustering(
                CX_all,
                num_clusters = nb_clusters,
                gpu_ids = None if backend != 'faiss-gpu'
                    else torch.cuda.current_device(),
                niter=100,
                nredo=5,
                verbose=0
            )
            assert(len(np.unique(C))==nb_clusters)
            CC = []
            for i, v in border.items():
                for _ in range(v[1]-v[0]):
                    CC.append(C[i])
            C = np.array(CC)
          else:

            C = faissext.do_clustering(
                X_all,
                num_clusters = nb_clusters,
                gpu_ids = None if backend != 'faiss-gpu'
                    else torch.cuda.current_device(),
                niter=100,
                nredo=5,
                verbose=0
            )
    else:
        T_all = np.array(data_loader.dataset.ys)
        I_all = np.array(data_loader.dataset.I)
        C = np.zeros(len(T_all), dtype=int)
    return C, T_all, I_all

def cluster_by_cap(cap):
    from queue import PriorityQueue
    class base_class(object):
        def __init__(self, id, used, cap):
            self.id = id
            self.used = used
            self.cap = cap

        def __lt__(self, other):
            if self.used != other.used:
                return self.used < other.used
            else:
                return self.cap < other.cap

    # assert (cap.shape == torch.Size([L, C]))
    L, C = cap.shape
    used = defaultdict(int)
    class_per_learner = C / L

    rv = []
    for i in range(L):
        q = PriorityQueue()
        for j in range(C):
            q.put(base_class(j, used[j], cap[i][j]))

        cnt = 0
        classes = []
        while not q.empty() and cnt < class_per_learner:
            c = q.get()  # 根据优先级取序列
            used[c.id] += 1
            cnt += 1
            classes.append(c.id)

        rv.append(classes)
    rv = np.array(rv)
    return rv

def __gen_sub_indices(config, T, C, I):
    """
    T: class labels
    C: meta class labels
    I: indices
    return: sample indices for each data subset
    """
    subs = []
    if cluster_by_class:
        # 获取每个元类包含的类, MC[0] 表示元类0所包含的类
        MC = []
        NMC = []
        for c in range(config['nb_clusters']):
            MC.append(np.unique(T[C == c]))
        # The number of classes in the meta-class is not balanced,
        # randomly assign classes to meta-class to make up
        max_class_num = max(map(lambda x: x.size, MC))
        Target = np.unique(T)
        for x in MC:
            x: np.ndarray
            while x.size < max_class_num:
                c = np.random.choice(Target, 1)
                if c not in x:
                    x = np.append(x, c)
            NMC.append(x)
        # get sub_indices
        for x in NMC:
            mask = np.zeros(T.shape, dtype=bool)
            for p in x:
                mask = np.logical_or(mask, (T == p))
            subs.append(I[mask])
    else:
        for c in range(config['nb_clusters']):
            subs.append(I[C == c])

    return subs


def make_clustered_dataloaders(model, dataloader_init, config,
        reassign = False, I_prev = None, C_prev = None, logging = None):

    def correct_indices(I):
        return torch.sort(torch.LongTensor(I))[1]

    C, T, I = __get_cluster_labels(model, dataloader_init, use_penultimate=True,
                                   nb_clusters=config['nb_clusters'], backend=config['backend'])
    # NOTE: C/T/I.size = 8054,  T.tolist() = [0,0,0,...,97,97,97], I.tolist() = [0,1,2,...,8053]

    if reassign:

        # get correct indices for samples by sorting them and return arg sort
        I_correct = correct_indices(I)
        assert I_correct.tolist() == I.tolist()
        I = I[I_correct]
        T = T[I_correct]
        C = C[I_correct]

        # also use the same indices of sorted samples for previous data
        I_prev_correct = correct_indices(I_prev)
        I_prev = I_prev[I_prev_correct]
        C_prev = C_prev[I_prev_correct]

        logging.debug('Reassigning clusters...')
        logging.debug('Calculating NMI for consecutive cluster assignments...')
        logging.debug(str(
            evaluation.calc_normalized_mutual_information(C[I], C_prev[I_prev])))

        # assign s.t. least costs w.r.t. L1 norm
        C, costs = data.loader.reassign_clusters(C_prev = C_prev,
                C_curr = C, I_prev = I_prev, I_curr = I)
        logging.debug('Costs before reassignment')
        logging.debug(str(costs))
        _, costs = data.loader.reassign_clusters(C_prev = C_prev,
                C_curr = C, I_prev = I_prev, I_curr = I)
        # after printing out the costs now, the trace of matrix should
        # have lower numbers than other entries in matrix
        logging.debug('Costs after reassignment')
        logging.debug(str(costs))

    #  remove labels s.t. minimum 2 samples per class per cluster
    for c in range(config['nb_clusters']):
        for t in np.unique(T[C == c]):
            if (T[C == c] == t).sum().item() == 1:
                # assign to cluster -1 if only one sample from class
                C[(T == t) & (C == c)] = -1

    if config['nb_clusters'] > 1:
        subs = __gen_sub_indices(config, T, C, I)
    else:
        subs = [I]

    dls = data.loader.make_from_clusters(
        sub_indices=subs, model=model, config=config
    )

    return dls, C, T, I
