import torch
import scipy
import numpy as np

from torch.utils.data import DataLoader

from .AttackUtil import *
from .DataProcess import *
from .ModelUtil import *

import os

def cal_score(conf_data, label_data):
    # 基于模型的输出置信度，计算LIRA攻击需要的score指标。
    y_true = conf_data[np.arange(conf_data.shape[0]), label_data]   # 挑选真实标签对应的置信度
    conf_data[np.arange(conf_data.shape[0]),label_data] = 0
    y_wrong = conf_data.sum(axis=1)                                 # 计算其余标签的置信度
    # print(y_true, y_wrong)
    scores = (np.log(y_true+1e-45) - np.log(y_wrong+1e-45))         # 计算分数
    return scores


def load_conf_data(x_data, y_data, weight_dir, model_index, data_name, model, weight_part, transform, batch_size, device):    
    # 获得给定数据在特定模型上的输出置信度以及标签
    training_data = CustomDataset(x_data, y_data, transform)
    train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=False)
    # 模型创建
    if model in ['NN', 'NN_4layer']:
        Reference_Model = globals()['create_{}_model'.format(model)](x_data.shape[1], y_data.max()+1)
    elif model == 'CNN':
        Reference_Model = globals()['create_{}_model'.format(model)](y_data.max()+1, data_name)
    else:
        Reference_Model = globals()['create_{}_model'.format(model)](y_data.max()+1)
    # 加载参数
    weight_path = os.path.join(weight_dir, weight_part + "{}.pth".format(model_index))
    # print(Reference_Model)
    Reference_Model.load_state_dict(torch.load(weight_path))
    Reference_Model.to(device)
    # 数据准备
    conf_data, label_data = get_model_pred(train_dataloader, Reference_Model, device)
    conf_data = conf_data.detach().cpu().numpy()
    label_data = label_data.detach().cpu().numpy()
    
    return conf_data, label_data

def load_logits_data(x_data, y_data, weight_dir, model_index, data_name, model, weight_part, transform, batch_size, device):    
    # 获得给定数据在特定模型上的输出置信度以及标签
    training_data = CustomDataset(x_data, y_data, transform)
    train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=False)
    # 模型创建
    if model in ['NN', 'NN_4layer']:
        Reference_Model = globals()['create_{}_model'.format(model)](x_data.shape[1], y_data.max()+1)
    elif model == 'CNN':
        Reference_Model = globals()['create_{}_model'.format(model)](y_data.max()+1, data_name)
    else:
        Reference_Model = globals()['create_{}_model'.format(model)](y_data.max()+1)
    # 加载参数
    weight_path = os.path.join(weight_dir, weight_part + "{}.pth".format(model_index))
    # print(Reference_Model)
    Reference_Model.load_state_dict(torch.load(weight_path))
    Reference_Model.to(device)
    # 数据准备
    logits_data, label_data = get_model_logits(train_dataloader, Reference_Model, device)
    logits_data = logits_data.detach().cpu().numpy()
    label_data = label_data.detach().cpu().numpy()
    
    return logits_data, label_data

def load_loss_data(x_data, y_data, loss_fn, weight_dir, model_index, data_name, model, weight_part, transform, batch_size, device):    
    # 获得给定数据在特定模型上的损失以及标签
    training_data = CustomDataset(x_data, y_data, transform)
    train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=False)
    # 模型创建
    if model in ['NN', 'NN_4layer']:
        Reference_Model = globals()['create_{}_model'.format(model)](x_data.shape[1], y_data.max()+1)
    elif model == 'CNN':
        Reference_Model = globals()['create_{}_model'.format(model)](y_data.max()+1, data_name)
    else:
        Reference_Model = globals()['create_{}_model'.format(model)](y_data.max()+1)
    # 加载参数
    weight_path = os.path.join(weight_dir, weight_part + "{}.pth".format(model_index))
    # print(Reference_Model)
    Reference_Model.load_state_dict(torch.load(weight_path))
    Reference_Model.to(device)
    # 数据准备
    loss_data, label_data = get_model_loss(train_dataloader, Reference_Model, loss_fn, device)
    loss_data = loss_data.detach().cpu().numpy()
    label_data = label_data.detach().cpu().numpy()
    
    return loss_data, label_data

def get_score_from_model(dataloader, model, device):
    conf_data, label_data = get_model_pred(dataloader, model, device)
    conf_data = conf_data.detach().cpu().numpy()
    label_data = label_data.detach().cpu().numpy()
    conf_data = conf_data.astype(np.float64)
    score = cal_score(conf_data.copy(), label_data)
    return conf_data, score

def load_score_data_all(x_data, y_data, weight_dir, model_num, data_name, model, weight_part, transform, batch_size, device):
    for i in range(model_num):
        conf_data, label_data = load_conf_data(x_data=x_data, y_data=y_data, weight_dir=weight_dir, model_index=i, data_name=data_name, model=model, 
                                                        weight_part=weight_part, transform=transform, batch_size=batch_size, device=device)
        conf_data = conf_data.astype(np.float64)
        score = cal_score(conf_data.copy(), label_data)
        score = score.reshape(1, len(score))
        conf_data = conf_data.reshape(1, conf_data.shape[0], conf_data.shape[1])
        if i == 0:
            score_all = score
            conf_data_all = conf_data
        else:
            score_all = np.concatenate((score_all, score), 0)
            conf_data_all = np.concatenate((conf_data_all, conf_data), 0)
    return conf_data_all, label_data, score_all

def load_loss_data_all(x_data, y_data, loss_fn, weight_dir, model_num, data_name, model, weight_part, transform, batch_size, device):
    for i in range(model_num):
        loss_data, label_data = load_loss_data(x_data=x_data, y_data=y_data, loss_fn=loss_fn, weight_dir=weight_dir, model_index=i, data_name=data_name, model=model, 
                                                        weight_part=weight_part, transform=transform, batch_size=batch_size, device=device)
        loss_data = loss_data.astype(np.float64)       
        loss_data = loss_data.reshape(1, len(loss_data))
        if i == 0:
            loss_data_all = loss_data
        else:
            loss_data_all = np.concatenate((loss_data_all, loss_data), 0)
    return loss_data_all, label_data

def load_logits_data_all(x_data, y_data, weight_dir, model_num, data_name, model, weight_part, transform, batch_size, device):
    for i in range(model_num):
        logits_data, label_data = load_logits_data(x_data=x_data, y_data=y_data, weight_dir=weight_dir, model_index=i, data_name=data_name, model=model, 
                                                        weight_part=weight_part, transform=transform, batch_size=batch_size, device=device)
        logits_data = logits_data.astype(np.float64)       
        logits_data = logits_data.reshape(1, logits_data.shape[0], logits_data.shape[1])
        if i == 0:
            logits_data_all = logits_data
        else:
            logits_data_all = np.concatenate((logits_data_all, logits_data), 0)
    return logits_data_all, label_data


def LIRA_attack(keep, score_all, target_score, target):
    dat_in = []
    dat_out = []
    for i in range(score_all.shape[1]):
        dat_in.append((score_all[keep[:,i],i]))
        dat_out.append((score_all[~keep[:,i],i]))
    dat_in = np.array(dat_in)
    dat_out = np.array(dat_out)
    mean_in = np.median(dat_in, 1)
    mean_out = np.median(dat_out, 1)
    std_in = np.std(dat_in)
    std_out = np.std(dat_in)
    prediction = []
    for i in range(score_all.shape[1]):
        pr_in = scipy.stats.norm.logpdf(target_score[i], mean_in[i], std_in+1e-30)
        pr_out = scipy.stats.norm.logpdf(target_score[i], mean_out[i], std_out+1e-30)
        m_score = pr_in-pr_out
        prediction.append(m_score)
    prediction = np.array(prediction)
    return prediction

def get_risk_score(loss_all, train_keep):
    dat_in = []
    dat_out = []
    for i in range(loss_all.shape[1]):
        dat_in.append((loss_all[train_keep[:,i],i]))
        dat_out.append((loss_all[~train_keep[:,i],i]))
    dat_in = np.array(dat_in)
    dat_out = np.array(dat_out)
    mean_in = np.median(dat_in, 1)
    mean_out = np.median(dat_out, 1)
    risk_score = mean_out - mean_in
    risk_score = np.maximum(risk_score,-risk_score)
    return risk_score

def get_loss_threshold(loss_all, train_keep):
    dat_in = []
    dat_out = []
    for i in range(loss_all.shape[1]):
        dat_in.append((loss_all[train_keep[:,i],i]))
        dat_out.append((loss_all[~train_keep[:,i],i]))
    dat_in = np.array(dat_in)
    dat_out = np.array(dat_out)
    mean_in = np.median(dat_in, 1)
    mean_out = np.median(dat_out, 1)
    loss_threshold = (mean_out + mean_in)/2
    return loss_threshold




# 基于离群点的LIRA攻击
def out_LIRA_attack(score_all, train_keep, out_score, target_score, target, out_ratio = 0.01):
    out_rank = np.argsort(out_score)
    out_rank = np.flip(out_rank) 
    out_num = int(score_all.shape[1]*out_ratio)
    prediction = LIRA_attack(train_keep[:,out_rank[:out_num]], score_all[:,out_rank[:out_num]], target_score[out_rank[:out_num]], target[out_rank[:out_num]])
    evaluate_ROC(prediction, target[out_rank[:out_num]], threshold=0)


