from __future__ import print_function
from __future__ import division

from . import evaluation
from . import similarity
import numpy as np
import torch
import logging
from collections import defaultdict
from tqdm import tqdm


def predict_batchwise(model, dataloader, use_penultimate, is_dry_run=False):
    """
    assert dataloader.dataset[0] is tuple of (image, label, index)
    """
    model_is_training = model.training
    model.eval()
    ds = dataloader.dataset
    F = np.array([], dtype=np.float32)
    T = np.array([], dtype=np.int32)
    I = np.array([], dtype=np.int32)
    with torch.no_grad():

        # use tqdm when the dataset is large (SOProducts)
        is_verbose = len(dataloader.dataset) > 0

        for batch in tqdm(dataloader, desc='predict', disable=not is_verbose):
            # batch: [sz_batch * images(3x224x224), sz_batch * labels, sz_batch * indices]
            X = batch[0].cuda(non_blocking=True)
            T = np.append(T, batch[1].data.cpu().numpy())
            I = np.append(I, batch[2].data.cpu().numpy())

            if not is_dry_run:
                # move images to device of model (approximate device)
                # X = X.to(list(model.parameters())[0].device)
                # predict model output for image
                X = model(X, use_penultimate).data.cpu().numpy()
                # take only subset of resulting embedding w.r.t dataset
                F = np.append(F, X)

    model.train(model_is_training) # revert to previous training state
    if is_dry_run:
        # do not return features if is_dry_run
        return [None, T, I]
    else:
        F = F.reshape(len(ds), -1)
        return [F, T, I]


def evaluate_in_shop(model, dl_query, dl_gallery, use_penultimate, backend,
        K = [1, 10, 20, 30, 50], with_nmi = True):

    # calculate embeddings with model and get targets
    X_query, T_query, _ = predict_batchwise(model, dl_query, use_penultimate)
    X_gallery, T_gallery, _ = predict_batchwise(model, dl_gallery, use_penultimate)

    nb_classes = dl_query.dataset.nb_classes()
    assert nb_classes == len(set(T_query))

    label_counts = defaultdict(lambda : 0)
    for y in dl_gallery.dataset.ys:
        label_counts[y] += 1
    maxK = max(label_counts.values())
    # calculate full similarity matrix, choose only first `len(X_query)` rows
    # and only last columns corresponding to the column
    T_eval = torch.cat([torch.from_numpy(T_query), torch.from_numpy(T_gallery)])
    X_eval = torch.cat([torch.from_numpy(X_query), torch.from_numpy(X_gallery)])
    D = similarity.pairwise_distance(X_eval)[:len(X_query), len(X_query):]

    D = torch.from_numpy(D)
    # get top k labels with smallest (`largest = False`) distance
    Y = T_gallery[D.topk(k = max(maxK, max(K)), dim = 1, largest = False)[1]]

    scores = {}

    recall = []
    for k in K:
        r_at_k = evaluation.calc_recall_at_k(T_query, Y, k)
        recall.append(r_at_k)
        logging.info("R@{} : {:.3f}".format(k, 100 * r_at_k))

    scores['recall'] = recall

    mAP_at_R = evaluation.mean_average_precision_at_r(Y, T_query.reshape(-1, 1), False, label_counts, average_per_class=False)
    logging.info("mAP@R: {:.3f}".format(100 * mAP_at_R))

    if with_nmi:
        # calculate NMI with kmeans clustering
        labels_pred = similarity.cluster_by_kmeans(X_eval.numpy(), nb_classes, backend=backend)
        nmi = evaluation.calc_normalized_mutual_information(T_eval.numpy(), labels_pred)
        logging.info("NMI: {:.3f}".format(nmi * 100))
        scores['nmi'] = nmi

    __saveknn({'knny':Y, 'knnd':D, 'targets':T_query, 'labels_pred':labels_pred, 'labels_true':T_eval.numpy()})

    return scores

# {'knny':knny, 'knni':knni, 'knnd':knnd, 'targets':T}
def __saveknn(args):
    """Save knn results to shelve file.

    :knny: (number of query, K), K nearest neighbor y labels for each query
    :knni: (number of query, K), K nearest neighbor indices for each query
    :knnd: (number of query, K), K nearest neighbor distances for each query
    :returns: void

    """
    assert type(args).__name__ == 'dict'
    import shelve
    import os
    from lib.config import _global_cfg as cfg
    # save metrics etc. to shelve file (*.dat)
    path = os.path.join(cfg.log.path, f'knnsave@{cfg.e}')
    with shelve.open(str(path), writeback=True) as f:
        for k, v in args.items():
            f[k] = v

    logging.info("knn results saved")

def evaluate(model, dataloader, use_penultimate, backend, K=[1, 2, 4, 8], with_nmi=True):
    nb_classes = dataloader.dataset.nb_classes()

    # calculate embeddings X with model and get targets T
    X, T, _ = predict_batchwise(model, dataloader, use_penultimate)

    scores = {}

    # NOTE: For SOP, NMI is useless:  dataloader.dataset.__class__.__name__ != 'SOProducts'
    # calculate NMI with kmeans clustering
    if with_nmi:
        labels_pred = similarity.cluster_by_kmeans(X, nb_classes, backend=backend)
        nmi = evaluation.calc_normalized_mutual_information(T, labels_pred)
        logging.info("NMI: {:.3f}".format(nmi * 100))
        scores['nmi'] = nmi

    label_counts = defaultdict(lambda : 0)
    for y in dataloader.dataset.ys:
        label_counts[y] += 1
    maxK = max(label_counts.values())
    # get predictions by assigning nearest 8 neighbors with euclidian
    # assert np.max(K) <= 8, ("Sorry, this is hardcoded here."
    #             " You would need to retrieve > 8 nearest neighbors"
    #                         " to calculate R@k with k > 8")
    Y, knni, knnd = similarity.assign_by_euclidian_at_k(X, T, max(maxK, max(K)), backend=backend)
    __saveknn({'knny':Y, 'knni':knni, 'knnd':knnd, 'targets':T, 'labels_pred':labels_pred, 'labels_true':T})

    # calculate recall@k
    recall = []
    for k in K:
        r_at_k = evaluation.calc_recall_at_k(T, Y, k)
        recall.append(r_at_k)
        logging.info("R@{}: {:.3f}".format(k, 100 * r_at_k))

    scores['recall'] = recall

    mAP_at_R = evaluation.mean_average_precision_at_r(Y, T.reshape(-1, 1), True, label_counts, average_per_class=False)
    logging.info("mAP@R: {:.3f}".format(100 * mAP_at_R))

    scores['mAP@R'] = mAP_at_R

    return scores
