import time
from core.utils import logging
import torch

def get_time(name: str, is_need_log: bool):
    def decorator(func):
        def wrapper(*args, **kw):
            start_time = time.time()
            result = func(*args, **kw)
            end_time = time.time()
            if is_need_log:
                logging.info(f'{name}\'s time is {end_time - start_time}')
            else:
                runner = args[0]
                runner.metric[name] = runner.metric.get(name, 0) + (end_time - start_time) / runner.log_cfg.get('interval')
            return result
        return wrapper
    return decorator

def gold_labels(span_indices, spans, span_labels):
    """
    return the span's corresponding label.
    :param span_indices: the predicted span indicies
    :param spans: true data's spans indicies
    :param span_labels: true data's span lables
    :return:
        gold_indices: it shoule be the same as span_indices.
        gold_labels: size is (batch_size, span_length, 1).
    """
    # old span labels
    gold_indices, gold_labels = [], [] # 根据代码来看，gold_indices和indices应该是一样的
    for batch_idx, indices in enumerate(span_indices):
        gold_ind, gold_lab = [], []
        for indice in indices:
            if indice in spans[batch_idx]:
                ix = spans[batch_idx].index(indice)
                gold_lab.append(span_labels[batch_idx][ix])
            else:
                gold_lab.append(0)
            gold_ind.append(indice)
        gold_indices.append(gold_ind)
        gold_labels.append(gold_lab)

    return gold_indices, gold_labels

def log_likelihood(probability, indices, gold_indices, gold_labels):
    """
    The training objective is defined as the sum of the negative log-likelihood from both the mention module and triplet module.
    where m∗i,j is the gold mention type of the span si,j ,and r∗is the gold sentiment relation of the target and opinion span
    pair (St_a,b, So_c,d). S indicates the enumerated span pool; Stand So are the pruned target and opinion span candidates.

    Args:
        probability(torch.Tensor): size is (batch_size, span_len/relation_len, span_label_lel/relation_label_len)
        indices(array): size is (batch_size, span_len/relation_len)
    :param probability: the probability from span or candidates
    :type Tensor
    :param indices: the indices for predicted span or candidates
    :type List[List[Tuple(i,j)]] or List[List[Tuple(a,b,c,d)]]
    :param span:
    :param labels:
    :type List[List[0/1)]]
    :return: negative log-likelihood
    """
    # Statistically predict the indices of the correct mention or candidates
    gold_indice_labels = []
    for batch_idx, label in enumerate(gold_indices):
        for i, l in enumerate(label):
            if l in indices[batch_idx]:
                idx = indices[batch_idx].index(l)
                gold_indice_labels.append((batch_idx, idx, gold_labels[batch_idx][i]))

    # sum of the negative log-likelihood from both the mention module and triplet module
    eps = 1e-5
    loss = [-torch.log(probability[c[0], c[1], c[2]] + eps) for c in gold_indice_labels]
    loss = torch.stack(loss).sum()
    return loss

def metrics(probability, labels):
    """
    Collection metrics include (precision、recall、f1)
    :param probability:
    :param labels:
    :return: precision, recall, f1
    """
    epsilon = 1e-6
    num_correct = torch.logical_and(labels == probability.argmax(-1), probability.argmax(-1) != 0).sum().item()
    num_proposed = (probability.argmax(-1) != 0).sum().item()
    num_gold = (labels != 0).sum().item()

    # 为什么要加上一个epsilon
    precision = num_correct / (num_proposed + epsilon)
    recall = num_correct / (num_gold + epsilon)
    f1 = 2 * precision * recall / (precision + recall + epsilon)
    return precision, recall, f1
