import re
from functools import partial
from typing import List, Union, Callable

import numpy as np
from scipy.sparse import spmatrix, isspmatrix

metrics_pattern = re.compile(r'(^[a-z]+)(?:@(\d+?))?$')


def define_metric(metric_str, metrics_dict, all_rel):
    metric_str = metric_str.lower()
    match = metrics_pattern.match(metric_str)
    if match:
        metric = match.group(1)
        k = match.group(2)
        k = int(k) if k is not None else None
        if metric in {'precision', 'pre', 'pr'}:
            metric = partial(metrics_dict['precision'], k=k)
        elif metric in {'recall', 're'}:
            metric = partial(metrics_dict['recall'], all_rel=all_rel, k=k)
        elif metric in {'map', 'average precision', 'ap'}:
            metric = partial(metrics_dict['ap'], all_rel=all_rel, k=k)
        elif metric == 'ndcg':
            metric = partial(metrics_dict['ndcg'], all_rel=all_rel, k=k)
        return metric
    return None


def define_metric_str(metric_str):
    metric_str = metric_str.lower()
    match = metrics_pattern.match(metric_str)
    if match:
        metric = match.group(1)
        k = match.group(2)
        k = int(k) if k is not None else None
        if metric in {'precision', 'pre', 'pr'}:
            metric = 'precision', k
        elif metric in {'recall', 're'}:
            metric = 'recall', k
        elif metric in {'map', 'average precision', 'ap'}:
            metric = 'ap', k
        elif metric == 'ndcg':
            metric = 'ndcg', k
        return metric
    return None


"""rel represents the list of relevant items in a query
 and rel_i \\in {0, 1}"""


def precision_at_k(rel, k=None):
    rel = np.asarray(rel, dtype=np.int)[:k]
    return np.mean(rel)


def recall_at_k(rel, all_rel, k=None):
    rel = np.asarray(rel, dtype=np.int)[:k]
    return np.sum(rel) / all_rel


def average_precision_at_k(rel, all_rel, k=None):
    rel = np.asarray(rel, dtype=np.int)[:k]
    rank = np.where(rel == 1)[0] + 1
    return np.sum(np.arange(1, 1 + rank.size) / rank) / all_rel


def dcg(rel):
    rel = np.asarray(rel, dtype=np.int)
    return np.sum(rel / np.log2(np.arange(2, 2 + rel.size)))


def ndcg_at_k(rel, all_rel, k=None):
    rel = np.asarray(rel)[:k]
    return dcg(rel) / dcg(np.ones(all_rel)[:k])


"""matrix version"""


def mat_precision_at_k(rel, k=None):
    rel = rel[..., :k]
    return np.mean(rel, axis=-1)


def mat_recall_at_k(rel, all_rel, k=None):
    rel = rel[..., :k]
    return np.sum(rel, axis=-1) / all_rel


def mat_average_precision_at_k(rel, all_rel, k=None):
    rel = rel[..., :k]

    def sum_precision(r):
        rank = np.where(r == 1)[0] + 1
        return np.sum(np.arange(1, 1 + rank.size) / rank)

    return np.apply_along_axis(sum_precision, axis=-1, arr=rel) / all_rel


def mat_dcg(rel):
    return np.sum(rel / np.log2(np.arange(2, 2 + rel.shape[-1])), axis=-1)


def mat_ndcg_at_k(rel, all_rel, k=None):
    all_rel = all_rel[:, np.newaxis]
    m = np.max(all_rel)
    rel = rel[..., :k]

    def one_row(i):
        row = np.zeros(m)
        row[:i[0]] = 1
        return row

    ideal_rel = np.apply_along_axis(one_row, axis=-1, arr=all_rel)[..., :k]
    # lambda i: np.concatenate([np.ones(i), np.zeros(m - i)])
    # idcg = np.apply_along_axis(lambda all_r: dcg(np.ones(all_r)[:k]), axis=-1, arr=all_rel)
    return mat_dcg(rel) / mat_dcg(ideal_rel)


def calculate_relevant_mat(train_mat, test_mat, rating_mat):
    rating_mat = np.where(train_mat.astype(np.bool), -np.inf, rating_mat)
    return np.take_along_axis(test_mat, rating_mat.argsort(axis=1)[:, ::-1], axis=1)


def metrics_function(train_mat: Union[np.ndarray, spmatrix],
                     test_mat: Union[np.ndarray, spmatrix],
                     metrics: Union[List[str], str],
                     mean=True) -> Callable:
    if isspmatrix(train_mat):
        train_mat = train_mat.toarray()
    if isspmatrix(test_mat):
        test_mat = test_mat.toarray()

    if type(metrics) is str:
        metrics = [metrics]

    all_rel = np.sum(test_mat, axis=-1)
    metrics_dict = {'precision': mat_precision_at_k,
                    'recall': mat_recall_at_k,
                    'ap': mat_average_precision_at_k,
                    'ndcg': mat_ndcg_at_k}

    metrics_list = list(filter(lambda i: i[1] is not None,
                               map(lambda s: (s, define_metric(s, metrics_dict, all_rel)), metrics)))

    def metrics_wrapper(rating):
        # assert rating.shape != 3 or rating.shape != 2
        if len(rating.shape) == 3:
            rating = rating[0]
        rel_mat = calculate_relevant_mat(train_mat, test_mat, rating)
        result_metrics_dict = {}
        for metric_name, metric in metrics_list:
            result_metrics_dict[metric_name] = metric(rel_mat)
        if mean:
            for metric_name, metric in metrics_list:
                result_metrics_dict[metric_name] = np.mean(result_metrics_dict[metric_name])
        return result_metrics_dict

    return metrics_wrapper


def is_metric(alias):
    match = re.match(r'(^[a-z]+)(?:@(\d+?))?$', alias)
    if match:
        metric = match.group(1)
        return metric in {'precision', 'pre', 'pr', 'recall', 're', 'map', 'average precision', 'ap', 'ndcg'}
    return False
