import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, roc_curve, auc


def cal_cos(emd1, emd2):
    mult_sum = np.sum(emd1 * emd2, axis=1)
    return mult_sum / (np.linalg.norm(emd1, axis=1) * np.linalg.norm(emd1, axis=1)+1e-5)

def cal_l2(emd1, emd2):
    diff = emd1 - emd2
    return -np.linalg.norm(diff, axis=1)


def find_best_acc(labels, scores):
    best_thresh = None
    best_acc = 0
    for score in scores:
        preds = np.greater_equal(scores, score).astype(np.int32)
        acc = accuracy_score(labels, preds, normalize=True)
        if acc > best_acc:
            best_thresh = score
            best_acc = acc
    return best_acc, best_thresh


def calculate_general(embeddings1, embeddings2, labels, metric='cos'):
    assert (embeddings1.shape[0] == embeddings2.shape[0] == len(labels))
    assert (embeddings1.shape[1] == embeddings2.shape[1])
    assert (metric in ['cos', 'l2'])

    if metric == 'cos':
        scores = cal_cos(embeddings1, embeddings2)
    else:
        scores = cal_l2(embeddings1, embeddings2)
    #
    # with open('score.txt', 'w') as f:
    #     for score, label in zip(scores, labels):
    #         f.write('{} {}\n'.format(score, label))
    fpr, tpr, thresh = roc_curve(labels, scores)
    auc_score = auc(fpr, tpr)
    best_acc, best_thresh = find_best_acc(labels, scores)

    return fpr, tpr, thresh, auc_score, best_acc, best_thresh


def plot_roc(fpr, tpr, label):
    plt.plot(fpr, tpr, label=label)
    plt.title('Receiver Operating Characteristics')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.legend()
    plt.plot([0, 1], [0, 1], 'g--')
    plt.grid(True)
    plt.show()
