import torch
import torch.nn as nn
import numpy as np

from torchvision import transforms
from torch.utils.data import DataLoader

from .AttackUtil import *
from .DataProcess import *

import os

class Attack_NN(nn.Module):
    def __init__(self, input_dim, num_classes):
        super().__init__()
        self.linear_relu_stack = nn.Sequential(
            nn.Linear(input_dim, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, num_classes)
        )

    def forward(self, x):
        logits = self.linear_relu_stack(x)
        y_pred = torch.sigmoid(logits)
        return y_pred


def train_attack_model(dataloader, model, loss_fn, optimizer, device):
    size = len(dataloader.dataset)
    model.train()
    for batch, (X, y) in enumerate(dataloader):
        X = X.to(torch.float32)
        X, y = X.to(device), y.to(device)
        y = y.reshape(len(y),1)
        # print(X,y)

        # Compute prediction error
        pred = model(X)
        loss = loss_fn(pred.to(torch.float64), y.to(torch.float64))

        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch % 100 == 0:
            loss, current = loss.item(), (batch + 1) * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")


def evaluate_attack(dataloader, model, loss_fn, device):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            y = y.reshape(len(y),1)
            pred = model(X)
#             print(pred>0.5)
            test_loss += loss_fn(pred.to(torch.float64), y.to(torch.float64)).item()
            # print(((pred>0.5).type(torch.float) == y).type(torch.float).sum().item())
            correct += ((pred>0.5).type(torch.float) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")


def get_attack_pred(dataloader, model, device):
    size = len(dataloader.dataset)
    i = 0
    model.eval()
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            y = y.reshape(len(y),1)
            pred = model(X)
            pred.to(torch.float64)
            if i==0:
                attack_scores =  pred
                mem_data = y
                i = i+1
            else:
                attack_scores = torch.cat((attack_scores, pred), 0)
                mem_data = torch.cat((mem_data, y), 0)
                i = i+1
    return attack_scores, mem_data


# 成员推断攻击——影子模型
def shadow_attack(sha_models, tar_model, model_num, weight_dir, data_name, model, model_transform, model_epochs, 
batch_size, learning_rate, attack_epochs, attack_transform, device, prop_keep=0.14, top_k=3, attack_class=False):
    i = 0
    # 准备训练攻击模型的数据
    for sha_model_index in sha_models:
        conf_data, m_data, label_data = load_attack_data(weight_dir, sha_model_index, model_num, data_name, model, model_epochs, model_transform, device, prop_keep=prop_keep)
        print(conf_data.shape, m_data.shape, label_data.shape)
        if i==0:
            conf_data_train =  conf_data
            label_data_train = label_data
            m_data_train = m_data
            i = i+1
        else:
            conf_data_train = np.concatenate((conf_data_train, conf_data), 0)
            label_data_train = np.concatenate((label_data_train, label_data), 0)
            m_data_train = np.concatenate((m_data_train, m_data), 0)
            i = i+1

    # 准备攻击模型测试数据
    conf_data_test, m_data_test, label_data_test = load_attack_data(weight_dir, tar_model, model_num, data_name, model, model_epochs, model_transform, device, prop_keep=prop_keep)
    print('test data:', conf_data_test.shape, m_data_test.shape, label_data_test.shape)

    pred_cor_train = (conf_data_train.argmax(1) == label_data_train)
    pred_cor_train = pred_cor_train.astype(int)
    pred_cor_test = (conf_data_test.argmax(1) == label_data_test)
    pred_cor_test = pred_cor_test.astype(int)


    if attack_class:
        train_indices = np.arange(len(conf_data_train))
        test_indices = np.arange(len(conf_data_test))      # 为训练集测试集中的每条数据建立下标
        unique_classes = np.unique(label_data_train)
        i = 0
        for c in unique_classes:
            print ('Training attack model for class {}...'.format(c))
            c_train_indices = train_indices[label_data_train == c]
            c_train_x, c_train_y = conf_data_train[c_train_indices], m_data_train[c_train_indices]
            c_test_indices = test_indices[label_data_test == c]
            c_test_x, c_test_y = conf_data_test[c_test_indices], m_data_test[c_test_indices]
            
            if top_k:
                # 仅使用概率向量的前3个值
                c_train_x, c_test_x = get_top_k_conf(top_k, c_train_x, c_test_x)

            training_data = CustomDataset(c_train_x, c_train_y, attack_transform)
            test_data = CustomDataset(c_test_x, c_test_y, attack_transform)

            # Create data loaders.
            train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
            test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

            # 创建模型
            Attack_Model = Attack_NN(c_train_x.shape[1], 1).to(device)
            print(Attack_Model)

            # Train model
            loss_fn = nn.BCELoss()
            optimizer = torch.optim.Adam(Attack_Model.parameters(), lr=learning_rate)  
            for t in range(attack_epochs):
                print(f"Epoch {t+1}\n-------------------------------")
                train_attack_model(train_dataloader, Attack_Model, loss_fn, optimizer, device)
            print("Done!")  

            # 测试
            print("Test for class {}".format(c))
            print("Train data:")
            attack_train_scores, attack_train_mem = get_attack_pred(train_dataloader, Attack_Model, device)
            attack_train_scores, attack_train_mem = attack_train_scores.detach().cpu().numpy(), attack_train_mem.detach().cpu().numpy()
            evaluate_ROC(attack_train_scores, attack_train_mem)

            print("Test data:")
            attack_test_scores, attack_test_mem = get_attack_pred(test_dataloader, Attack_Model, device)
            attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()
            evaluate_ROC(attack_test_scores, attack_test_mem)

            if i==0:
                attack_train_scores_all = attack_train_scores
                attack_train_mem_all = attack_train_mem
                attack_test_scores_all = attack_test_scores
                attack_test_mem_all = attack_test_mem
                i = i+1
            else:
                attack_train_scores_all = np.concatenate((attack_train_scores_all, attack_train_scores), 0)
                attack_train_mem_all = np.concatenate((attack_train_mem_all, attack_train_mem), 0)
                attack_test_scores_all = np.concatenate((attack_test_scores_all, attack_test_scores), 0)
                attack_test_mem_all = np.concatenate((attack_test_mem_all, attack_test_mem), 0)
                i = i+1

        print("Test for all classes")
        print("Train data:")
        evaluate_ROC(attack_train_scores_all, attack_train_mem_all)
        print("Test data:")
        evaluate_ROC(attack_test_scores_all, attack_test_mem_all)

        return 0

    else:
        # 训练攻击模型
        print(conf_data_train.shape, m_data_train.shape)

        if top_k:
            # 仅使用概率向量的前3个值
            conf_data_train, conf_data_test = get_top_k_conf(top_k, conf_data_train, conf_data_test)

        
        # data_train_X = np.concatenate((conf_data_train, label_data_train.reshape(label_data_train.shape[0],1)), 1)
        # data_test_X = np.concatenate((conf_data_test, label_data_test.reshape(label_data_test.shape[0],1)), 1)
        
        data_train_X = np.concatenate((conf_data_train, pred_cor_train.reshape(pred_cor_train.shape[0],1)), 1)
        data_test_X = np.concatenate((conf_data_test, pred_cor_test.reshape(pred_cor_test.shape[0],1)), 1)

        # data_train_X = conf_data_train
        # data_test_X = conf_data_train
        
        data_train_X = data_train_X.astype(np.float32)
        data_test_X = data_test_X.astype(np.float32)
        training_data = CustomDataset(data_train_X, m_data_train, attack_transform)
        test_data = CustomDataset(data_test_X, m_data_test, attack_transform)

        # Create data loaders.
        train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
        test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

        # 创建模型
        Attack_Model = Attack_NN(data_train_X.shape[1], 1).to(device)
        print(Attack_Model)

        # Train model
        loss_fn = nn.BCELoss()
        optimizer = torch.optim.Adam(Attack_Model.parameters(), lr=learning_rate)  
        for t in range(attack_epochs):
            print(f"Epoch {t+1}\n-------------------------------")
            train_attack_model(train_dataloader, Attack_Model, loss_fn, optimizer, device)
        print("Done!")  

        # 测试
        print("Train data:")
        attack_train_scores, attack_train_mem = get_attack_pred(train_dataloader, Attack_Model, device)
        attack_train_scores, attack_train_mem = attack_train_scores.detach().cpu().numpy(), attack_train_mem.detach().cpu().numpy()
        evaluate_ROC(attack_train_scores, attack_train_mem)

        print("Test data:")
        attack_test_scores, attack_test_mem = get_attack_pred(test_dataloader, Attack_Model, device)
        attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()
        evaluate_ROC(attack_test_scores, attack_test_mem)
        
        return Attack_Model


def shadow_attack_simp(conf_data_target, m_data_target, label_data_target, attack_model, attack_transform, batch_size, device, top_k=3):
    conf_data_x, _ = get_top_k_conf(top_k, conf_data_target, conf_data_target)
    test_data = CustomDataset(conf_data_x, m_data_target, attack_transform)
    test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

    print("Test data:")
    attack_test_scores, attack_test_mem = get_attack_pred(test_dataloader, attack_model, device)
    attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()
    evaluate_ROC(attack_test_scores, attack_test_mem)