from __future__ import division

import numpy as np
import torch
import sklearn.metrics.pairwise
from .. import faissext


def calc_recall_at_k(T, Y, k):
    """
    T : [nb_samples] (target labels)
    Y : [nb_samples x K] (K predicted labels/neighbours)
    """
    s = sum([1 for t, y in zip(T, Y) if t in y[:k]])
    return s / (1. * len(T))

def get_relevance_mask(shape, gt_labels, embeddings_come_from_same_source, label_counts):
    relevance_mask = np.zeros(shape=shape, dtype=np.int)
    for k,v in label_counts.items():
        matching_rows = np.where(gt_labels==k)[0]
        max_column = v-1 if embeddings_come_from_same_source else v
        relevance_mask[matching_rows, :max_column] = 1
    return relevance_mask

# embeddings_come_from_same_source:
#   Set to True if query is a subset of reference or if query is reference. Set to False otherwise.
# average_per_class: If True, the average accuracy per class is computed, and then the average of those averages is returned.
#   This can be useful if your dataset has unbalanced classes. If False, the global average will be returned.
# gt_labels: (number_of_examples, 1)
def mean_average_precision_at_r(knn_labels, gt_labels, embeddings_come_from_same_source, label_counts, average_per_class):
    relevance_mask = get_relevance_mask(knn_labels.shape, gt_labels, embeddings_come_from_same_source, label_counts)
    num_samples, num_k = knn_labels.shape
    equality = (knn_labels == gt_labels) * relevance_mask.astype(bool)
    cumulative_correct = np.cumsum(equality, axis=1)
    k_idx = np.tile(np.arange(1, num_k + 1), (num_samples, 1))
    precision_at_ks = (cumulative_correct * equality) / k_idx
    summed_precision_per_row = np.sum(precision_at_ks * relevance_mask, axis=1)
    max_possible_matches_per_row = np.sum(relevance_mask, axis=1)
    accuracy_per_sample = summed_precision_per_row / max_possible_matches_per_row
    return maybe_get_avg_of_avgs(accuracy_per_sample, gt_labels, average_per_class)

def maybe_get_avg_of_avgs(accuracy_per_sample, sample_labels, avg_of_avgs):
    if avg_of_avgs:
        unique_labels = np.unique(sample_labels)
        mask = sample_labels == unique_labels[None, :]
        acc_sum_per_class = np.sum(accuracy_per_sample[:, None]*mask, axis=0)
        mask_sum_per_class = np.sum(mask, axis=0)
        average_per_class = acc_sum_per_class / mask_sum_per_class
        return np.mean(average_per_class)
    return np.mean(accuracy_per_sample)
