from argparse import ArgumentParser
import soundfile as sf
import torch
import torch.nn.functional as F
import numpy as np
import torch
import math
from tqdm import tqdm
from scipy.io import wavfile
from sklearn.metrics import roc_curve
from scipy.optimize import brentq
from scipy.interpolate import interp1d
from module.dataset import Evaluation_Dataset


def load_audio(path, snr=25):
    sample_rate, waveform = wavfile.read(path)
    waveform = waveform[:sample_rate*40]
    waveform = torch.from_numpy(waveform).unsqueeze(0).float()
    return waveform

def compute_eer(labels, scores):
    fpr, tpr, thresholds = roc_curve(labels, scores, pos_label=1)
    eer = brentq(lambda x: 1.0 - x - interp1d(fpr, tpr)(x), 0.0, 1.0)
    threshold = interp1d(fpr, thresholds)(eer)
    return eer, threshold

if __name__ == "__main__":
    from main import Task
    parser = ArgumentParser()
    parser = Task.add_model_specific_args(parser)
    parser.add_argument('--snr', type=int, default=-1)
    parser.add_argument('--top_n', type=int, help='trial file path', default=128)
    parser.add_argument('--cohort_path', type=str, help='', default=None)
    parser.add_argument('--checkpoint_path1', type=str, help='', default=None)
    parser.add_argument('--checkpoint_path2', type=str, help='', default=None)
    parser.add_argument('--norm', action='store_true')

    # model args
    args = parser.parse_args()
    model = Task(**args.__dict__)

    state_dict1 = torch.load(args.checkpoint_path1, map_location="cpu")["state_dict"]
    state_dict2 = torch.load(args.checkpoint_path2, map_location="cpu")["state_dict"]
    
    for (k1, v1), (k2, v2) in zip(state_dict1.items(), state_dict2.items()):
        state_dict1[k1] = (v1+v2) * 0.5

    model.load_state_dict(state_dict1, strict=True)
    model = model.eval()
    model = model.cuda()

    trials = np.loadtxt(args.trial_path, str)
    paths = np.unique(np.concatenate((trials.T[1], trials.T[2])))

    if args.norm:
        print("norm is true")
    else:
        print("norm is false")

    table = {}
    embs = []
    for idx, path in enumerate(tqdm(paths)):
        wavform = load_audio(path, args.snr)
        wavform = wavform.cuda()
        with torch.no_grad():
            embedding = model(wavform)
        embedding = embedding.detach().cpu().numpy()[0]
        embs.append(embedding)
        table[path] = embedding

    mean = np.mean(embs, axis=0)
    std = np.std(embs, axis=0)

    scores = []
    labels = []
    pos_scores = []
    neg_scores = []
    for label, enroll_path, test_path in tqdm(trials):
        enroll_vector = table[enroll_path]
        test_vector = table[test_path]
        if args.norm:
            #enroll_vector = (enroll_vector - mean) / std
            enroll_vector = (enroll_vector - mean)
            #test_vector = (test_vector - mean) / std
            test_vector = (test_vector - mean)

        score = enroll_vector.dot(test_vector.T)
        denom = np.linalg.norm(enroll_vector) * np.linalg.norm(test_vector)
        score = score/denom

        scores.append(score)
        labels.append(int(label))

        if int(label) == 0:
            neg_scores.append(score)
        if int(label) == 1:
            pos_scores.append(score)

    EER, threshold = compute_eer(labels, scores)
    print("cosine EER: {:.2f}% with threshold {:.2f}\n".format(EER*100, threshold))


