import numpy as np
import matplotlib.pyplot as plt

from sklearn import metrics
from torch.utils.data import DataLoader

from .ModelUtil import *
from .DataProcess import *

import os

def evaluate_ROC(scores, y_true, threshold=0.5):
    auc_value = metrics.roc_auc_score(y_true, scores)
    print("AUC value is:", auc_value)
    accuracy = metrics.accuracy_score(y_true, (scores>threshold).astype(int))
    print("Accuracy is:", accuracy)
    return accuracy

def ROC_plot(scores, y_true):
    fpr, tpr, thresholds = metrics.roc_curve(y_true, scores)
    auc_value = metrics.roc_auc_score(y_true, scores)
    plt.figure()
    lw = 2
    plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % auc_value)
    plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic (ROC) Curve')
    plt.legend(loc="lower right")
    plt.show()
    

def get_model_pred(dataloader, model, device):
    # 拼接模型的概率向量输出
    i = 0
    correct = 0
    size = len(dataloader.dataset)
    softmax = nn.Softmax(dim=1) 
    # softmax = nn.LogSoftmax(dim=1) 

    model.eval()
    with torch.no_grad():
        for X, y in dataloader:
            # print(X.shape)
            # print(y.shape)
            X = X.to(torch.float32)
            X, y = X.to(device), y.to(device)
            # pred = model(X)

            try:
                pred,_,_ = model(X)
            except:
                try:
                    pred,_ = model(X)
                except:
                    pred = model(X)

            # if len(pred.shape)<2:
            #     pred = pred.unsqueeze(1)

            # print(pred.shape)
            # print(y.shape)
            pred = softmax(pred)
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
            if i==0:
                conf_data =  pred
                label_data = y
                i = i+1
            else:
                conf_data = torch.cat((conf_data, pred), 0)
                label_data = torch.cat((label_data, y), 0)
                i = i+1
    
    correct /= size
    print(f" Error: \n Accuracy: {(100*correct):>0.1f}%  \n")
    return conf_data, label_data

def get_model_logits(dataloader, model, device):
    # 拼接模型的logits输出
    i = 0
    correct = 0
    size = len(dataloader.dataset)
    model.eval()
    with torch.no_grad():
        for X, y in dataloader:
            X = X.to(torch.float32)
            X, y = X.to(device), y.to(device)
            # logits = model(X)


            try:
                logits,_,_ = model(X)
            except:
                try:
                    logits,_ = model(X)
                except:
                    logits = model(X)


            correct += (logits.argmax(1) == y).type(torch.float).sum().item()
            if i==0:
                logits_data = logits
                label_data = y
                i = i+1
            else:
                logits_data = torch.cat((logits_data, logits), 0)
                label_data = torch.cat((label_data, y), 0)
                i = i+1
    
    correct /= size
    print(f" Error: \n Accuracy: {(100*correct):>0.1f}%  \n")
    return logits_data, label_data






def get_model_loss(dataloader, model, loss_fn, device):
    # 拼接模型的损失输出
    i = 0
    correct = 0
    size = len(dataloader.dataset)
    softmax = nn.Softmax(dim=1) 

    model.eval()
    with torch.no_grad():
        for X, y in dataloader:
            X = X.to(torch.float32)
            X, y = X.to(device), y.to(device)
            # logits = model(X)


            try:
                logits,_,_ = model(X)
            except:
                try:
                    logits,_ = model(X)
                except:
                    logits = model(X)




            loss = loss_fn(logits, y.to(torch.int64))
            pred = softmax(logits)
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
            if i==0:
                loss_data =  loss
                label_data = y
                i = i+1
            else:
                loss_data = torch.cat((loss_data, loss), 0)
                label_data = torch.cat((label_data, y), 0)
                i = i+1
    
    correct /= size
    print(f" Error: \n Accuracy: {(100*correct):>0.1f}%  \n")
    return loss_data, label_data



def load_attack_data(weight_dir, model_index, model_num, data_name, model, epochs, transform, device, batch_size=64, prop_keep=0.14):
    # 原始数据准备
    (x_train, y_train), (x_test, y_test), train_keep_exp, test_keep_exp = globals()['load_{}'.format(data_name)](model_index, model_num, prop_keep=prop_keep, seed=0)
    print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
    training_data = CustomDataset(x_train, y_train, transform)
    test_data = CustomDataset(x_test, y_test, transform)
    train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=False)
    test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
    # 模型创建
    if model in ['NN', 'NN_4layer']:
        Shadow_Model = globals()['create_{}_model'.format(model)](x_train.shape[1], y_train.max()+1)
    elif model == 'CNN':
        Shadow_Model = globals()['create_{}_model'.format(model)](y_train.max()+1, data_name)
    else:
        Shadow_Model = globals()['create_{}_model'.format(model)](y_train.max()+1)
    # 加载参数
    if data_name == 'Purchase100_limited' or data_name == 'MNIST':
        weight_path = os.path.join(weight_dir, "{}_{}_epoch{}_model{}.pth".format(data_name, model, epochs, model_index))
    else:
        weight_path = os.path.join(weight_dir, "{}_{}_epoch{}_shadownum100_model{}.pth".format(data_name, model, epochs, model_index))
    # weight_path = os.path.join(weight_dir, "{}_{}_epoch{}_model{}.pth".format(data_name, model, epochs, model_index))
    Shadow_Model.load_state_dict(torch.load(weight_path))
    Shadow_Model.to(device)
    # 数据准备
    conf_data_train, label_data_train = get_model_pred(train_dataloader, Shadow_Model, device)
    conf_data_test, label_data_test = get_model_pred(test_dataloader, Shadow_Model, device)
    conf_data = torch.cat((conf_data_train, conf_data_test), 0)
    conf_data = conf_data.detach().cpu().numpy()
    label_data = torch.cat((label_data_train, label_data_test), 0)
    label_data = label_data.detach().cpu().numpy()
    m_train = np.ones(y_train.shape[0])
    m_test = np.zeros(y_test.shape[0])
    m_data = np.concatenate((m_train, m_test), axis=0)
    return conf_data, m_data, label_data


def get_top_k_conf(k, train_x, test_x):
    # 该函数利用了numpy的花式索引，其中第一个矩阵代表输出的第一个维度下标，第二个矩阵代表输出的第二个维度下标，
    # 理论上这两个矩阵应当保持相同的形状，但是这里也应用了广播。
    sorted_indices = np.argsort(train_x)[:,-k:] # [:,-3:]表示选取每行的最后三个元素的索引
    train_x = train_x[np.arange(len(train_x))[:, None], sorted_indices]
    sorted_indices = np.argsort(test_x)[:,-k:] # [:,-3:]表示选取每行的最后三个元素的索引
    test_x = test_x[np.arange(len(test_x))[:, None], sorted_indices]

    return train_x, test_x