from math import fabs
import torch
from models import *
import os
from torchattacks import PGD, FGSM
from tqdm import tqdm
from torchvision.models import resnet50, ResNet50_Weights
import time

class Network:
    """
    create and train/test a network.
    """
    def __init__(self, dataer, param):
        """
        param (dict) : Include the parameters for network.
        """
        self.dataer =dataer
        self.query = 0
        
        self.root_path = './data'
        self.param = param
        self.model_arch = param['model_arch']
        self.device = param['device']
        self.cheackpoint_path = os.path.join(self.root_path, 'checkpoint')
        if os.path.exists(self.cheackpoint_path) == False:
            os.makedirs(self.cheackpoint_path)


        ## Model Architecture
        if self.model_arch == 'VGG16':
            self.net = VGG('VGG16').to(self.device)
        
        elif self.model_arch == 'VGG19':
            self.net = VGG('VGG19').to(self.device)
        
        elif self.model_arch == 'ResNet34':
            self.net = ResNet34().to(self.device)
        
        elif self.model_arch == 'GoogLeNet':

            self.net = GoogLeNet().to(self.device)
        
        elif self.model_arch == 'LeNet':
            self.net = LeNet().to(self.device)

        elif self.model_arch == 'AlexNet':
            self.net = AlexNet().to(self.device)

        elif self.model_arch == 'NiN':
            self.net = NIN(10).to(self.device)

        elif self.model_arch == 'ResNet18' or self.model_arch == 'adv_ResNet18' or self.model_arch == 'adv_pgd_ResNet18' or self.model_arch == 'adv_fgsm_ResNet18':
            self.net = ResNet18().to(self.device)
        
        elif self.model_arch == 'noise_ResNet18':
            self.net = ResNet18().to(self.device)

        elif self.model_arch == 'CNN':
            self.net = CNN().to(self.device)
            print(self.net)

        elif self.model_arch == 'ResNet50':
            self.net = resnet50(weights=ResNet50_Weights.IMAGENET1K_V1).to(self.device)

        elif self.model_arch == 'ResNet50_V2':
            self.net = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2).to(self.device)

        else:
            raise ValueError
        
        ## Train or Load
        if self.model_arch not in ['ResNet50', 'ResNet50_V2']:
            path = os.path.join(self.cheackpoint_path, self.generate_name())
            if os.path.exists(path):
                self.Load_model(path)
            else:
                print('Not Found the CheckPoint, Now Train and Save the Modle to Path <{}>'.format(path))
                
                if self.param['exp_type'] == 'robust':
                    ## adversarial training the model
                    self.Train_robust_model(path)
                
                elif self.param['exp_type'] == 'gaussian':
                    ## gaussian agumnetation for model
                    self.Train_noise_model(path)
                else:
                    ## traditional training the model
                    self.Train_model(path)
                
                print('Successfully Save the Model into Path <{}>',format(path))
                print('Save Done')
        
        self.net.eval()

    def Train_noise_model(self, path):

        epochs = self.param['epochs']
        lr = self.param['lr']
        batch_size = self.param['batch_size']
        sigma = self.param['sigma']
        criterion = torch.nn.CrossEntropyLoss()
        train_loader = self.dataer.get_loader(isTrain=True, batch_size=batch_size, isShuffle=True)
        optimizer = torch.optim.SGD(self.net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)

        best_acc = 0.0
        best_noise_acc = 0.0
        for epoch in range(1, epochs + 1):
            
            self.net.train()
            train_loss = 0.0
            correct = 0
            total = 0

            for batch_idx, (inputs, targets) in enumerate(train_loader):
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                noise_inputs = torch.clamp((inputs + sigma * torch.randn(inputs.shape).to(self.device)), min=0.0, max=1.0).detach()
                optimizer.zero_grad()
                outputs = self.net(noise_inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()

                train_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()

            print('Epoch : [{}/{}], Loss : {:.3f}, Train Acc : {:.2f}%'.format(epoch, epochs + 1, train_loss / (batch_idx + 1), 100. * correct / total))

            scheduler.step()
            acc = self.Test_model()
            noise_acc = self.Noise_test_model()

            if acc > best_acc and noise_acc > best_noise_acc:
                print('Saving')
                self.Save_model(path)
                best_acc = acc
                best_noise_acc = noise_acc

    def Train_robust_model(self, path):

        epochs = self.param['epochs']
        lr = self.param['lr']
        batch_size = self.param['batch_size']
        criterion = torch.nn.CrossEntropyLoss()
        train_loader = self.dataer.get_loader(isTrain=True, batch_size=batch_size, isShuffle=True)
        optimizer = torch.optim.SGD(self.net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)

        ## scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)

        ## epsilon = self.param['epsilon']
        if self.model_arch == 'adv_pgd_ResNet18':
            atk = PGD(self.net, eps=2/255, alpha=1/255, steps=5)

        elif self.model_arch == 'adv_fgsm_ResNet18':
            atk = FGSM(self.net, eps=2/255)

        best_acc = 0.0
        best_robust_acc = 0.0
        for epoch in range(1, epochs + 1):
            
            self.net.train()
            train_loss = 0.0
            correct = 0
            total = 0

            for batch_idx, (inputs, targets) in enumerate(tqdm(train_loader)):
                inputs = inputs.to(self.device)
                targets = targets.to(self.device)

                adv_inputs = atk(inputs, targets)

                optimizer.zero_grad()
                outputs = self.net(adv_inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()

                train_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()

            print('Epoch : [{}/{}], Loss : {:.3f}, Train Acc : {:.2f}%, LR : {:.4f}'.format(epoch, epochs + 1, train_loss / (batch_idx + 1), 100. * correct / total, optimizer.param_groups[0]['lr']))

            ##  scheduler.step()
            if epoch == 80 or epoch == 120:
                optimizer.param_groups[0]['lr'] *= 0.1
            acc = self.Test_model()
            robust_acc = self.Robust_test_model(atk)
            if acc > best_acc and robust_acc > best_robust_acc:
                print('Saving')
                self.Save_model(path)
                best_acc = acc
                best_robust_acc = robust_acc

    def Train_model(self, path):

        epochs = self.param['epochs']
        lr = self.param['lr']
        batch_size = self.param['batch_size']
        criterion = torch.nn.CrossEntropyLoss()
        train_loader = self.dataer.get_loader(isTrain=True, batch_size=batch_size, isShuffle=True)
        optimizer = torch.optim.SGD(self.net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)

        best_acc = 0.0
        for epoch in range(1, epochs + 1):
            
            self.net.train()
            train_loss = 0.0
            correct = 0
            total = 0

            for batch_idx, (inputs, targets) in enumerate(train_loader):
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                optimizer.zero_grad()
                outputs = self.net(inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()

                train_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()

            print('Epoch : [{}/{}], Loss : {:.3f}, Train Acc : {:.2f}%'.format(epoch, epochs + 1, train_loss / (batch_idx + 1), 100. * correct / total))

            scheduler.step()
            acc = self.Test_model()

            if acc > best_acc:
                print('Saving')
                self.Save_model(path)
                best_acc = acc

    def Save_model(self, path):
        torch.save(self.net.state_dict(), path)

    def Load_model(self, path):
        print('Load Model form Path : <{}>'.format(path))
        self.net.load_state_dict(torch.load(path))
        print('Load Done')

    def Test_model(self, ):
        self.net.eval()
        test_loss = 0
        correct = 0
        total = 0
        criterion = torch.nn.CrossEntropyLoss()
        test_loader = self.dataer.get_loader(isTrain=False, isShuffle=False, batch_size=100)
        # test_loader = self.dataer.get_shuffle_loader(seed=2, isTrain=False, batch_size=100)
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(tqdm(test_loader)):
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                outputs = self.net(inputs)
                loss = criterion(outputs, targets)
                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()
            print('Loss : {:.3f}, Acc : {:.2f}%'.format(test_loss/(batch_idx + 1), 100. * correct / total))
        return correct / total
    
    def Noise_test_model(self, ):
        self.net.eval()
        sigma = self.param['sigma']
        test_loss = 0
        correct = 0
        total = 0
        criterion = torch.nn.CrossEntropyLoss()
        test_loader = self.dataer.get_loader(isTrain=False, isShuffle=False, batch_size=100)
        # test_loader = self.dataer.get_shuffle_loader(seed=2, isTrain=False, batch_size=100)
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(tqdm(test_loader)):
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                noise_inputs = torch.clamp((inputs + sigma * torch.randn(inputs.shape).to(self.device)), min=0.0, max=1.0).detach()
                outputs = self.net(noise_inputs)
                loss = criterion(outputs, targets)
                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()
            print('Loss : {:.3f}, Noise Acc : {:.2f}%'.format(test_loss/(batch_idx + 1), 100. * correct / total))
        return correct / total

    def Robust_test_model(self, atk):

        self.net.eval()
        test_loss = 0
        correct = 0
        total = 0
        criterion = torch.nn.CrossEntropyLoss()
        test_loader = self.dataer.get_loader(isTrain=False, isShuffle=False, batch_size=100)
        # test_loader = self.dataer.get_shuffle_loader(seed=2, isTrain=False, batch_size=100)
        
        for batch_idx, (inputs, targets) in enumerate(tqdm(test_loader)):
            inputs, targets = inputs.to(self.device), targets.to(self.device)
            adv_inputs = atk(inputs, targets)
            outputs = self.net(adv_inputs)
            loss = criterion(outputs, targets)
            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
        print('Loss : {:.3f}, Robust Acc : {:.2f}%'.format(test_loss/(batch_idx + 1), 100. * correct / total))
        return correct / total

    def get_prediction_score(self, samples):
        
        self.net.eval()
        self.query += samples.shape[0]
        with torch.no_grad():
            samples = samples.to(self.device)
            output = self.net(samples)
        
        softmax_layer = torch.nn.Softmax(dim=1)
        probability_output  = softmax_layer(output)

        return probability_output.detach().cpu()
    
    def adversarial_test(self, perturbation, target, origin_label):
        self.net.eval()
        correct = 0
        total = 0
        test_loader = self.dataer.get_class_loader(origin_label, isTrain=False, batch_size=100)
        with torch.no_grad():
            for batch_idx, (inputs, _) in enumerate(test_loader):
                inputs = torch.clamp(inputs + perturbation, min=0.0, max=1.0)
                adv_targets = torch.tensor([target for i in range(inputs.shape[0])])
                inputs, adv_targets = inputs.to(self.device), adv_targets.to(self.device)
                outputs = self.net(inputs)
                _, predicted = outputs.max(1)
                total += adv_targets.size(0)
                correct += predicted.eq(adv_targets).sum().item()
        return correct / total * 100       
    
    def generate_name(self, ):
        
        if self.param['exp_type'] == 'gaussian':

            return self.dataer.dataset_name + '_' + 'sigma' + '_{:.4f}_'.format(self.param['sigma']) + self.model_arch + '.pth' 

        else:

            return self.dataer.dataset_name + '_' + self.model_arch + '.pth'
    
    def isTargetAttckSuccessful(self, origin_sample, perturbations, target_class):
        """
        支持单原始样本，多扰动共同判断是否攻击成功
        以CIFAR-10为例
        origin_sample (1, 3, 32, 32)
        perturbations (batch, 3, 32, 32)
        """
        if origin_sample.shape[0] != 1:
            raise Exception('the input batch for origin_sample or perturbation are not 1, the function not support')
        
        self.net.eval()

        ## 广播是原始样本扩展到扰动上，扰动的维度高
        adv_samples = torch.clamp(origin_sample + perturbations, min=0.0, max=1.0).to(self.device)     
           
        with torch.no_grad():
            output = self.net(adv_samples)
            _, pred = output.max(1)

        Succ_index = []

        for index, label in enumerate(pred):
            if label == target_class:
                Succ_index.append(index)
                
        if len(Succ_index) != 0:
            return True, Succ_index
        else:
            return False, Succ_index

    def isUntargetAttackSuccessful(self, origin_sample, perturbations, source_class):
        """
        支持单原始样本，多扰动共同判断是否攻击成功
        以CIFAR-10为例
        origin_sample (1, 3, 32, 32)
        perturbations (batch, 3, 32, 32)
        """
        if origin_sample.shape[0] != 1:
            raise Exception('the input batch for origin_sample or perturbation are not 1, the function not support')
        
        self.net.eval()

        ## 广播是原始样本扩展到扰动上，扰动的维度高
        adv_samples = torch.clamp(origin_sample + perturbations, min=0.0, max=1.0).to(self.device)
        
        with torch.no_grad():
            output = self.net(adv_samples)
            _, pred = output.max(1)

        Succ_index = []

        for index, label in enumerate(pred):
            if label != source_class:
                Succ_index.append(index)
                
        if len(Succ_index) != 0:
            return True, Succ_index
        else:
            return False, Succ_index
    
    def clear_query(self, ):

        self.query = 0

    def Reload_network(self, model_arch='ResNet18'):

        self.query = 0
        self.model_arch = model_arch
        
        self.cheackpoint_path = os.path.join(self.root_path, 'checkpoint')
        if os.path.exists(self.cheackpoint_path) == False:
            os.makedirs(self.cheackpoint_path)


        ## Model Architecture
        if self.model_arch == 'VGG16':
            self.net = VGG('VGG16').to(self.device)

        elif self.model_arch == 'ResNet18' or self.model_arch == 'adv_ResNet18' or self.model_arch == 'adv_pgd_ResNet18' or self.model_arch == 'adv_fgsm_ResNet18':
            self.net = ResNet18().to(self.device)

        elif self.model_arch == 'ResNet34':
            self.net = ResNet34().to(self.device)
        
        elif self.model_arch == 'VGG19':
            self.net = VGG('VGG19').to(self.device)

        elif self.model_arch == 'GoogLeNet':

            self.net = GoogLeNet().to(self.device)
        
        elif self.model_arch == 'LeNet':
            self.net = LeNet().to(self.device)

        elif self.model_arch == 'AlexNet':
            self.net = AlexNet().to(self.device)
        
        elif self.model_arch == 'NiN':
            self.net = NIN(10).to(self.device)

        elif self.model_arch == 'CNN':
            self.net = CNN().to(self.device)
            print(self.net)

        elif self.model_arch == 'ResNet50':
            self.net = resnet50(weights=ResNet50_Weights.IMAGENET1K_V1).to(self.device)

        else:
            raise ValueError
        
        ## Train or Load
        if self.model_arch != 'ResNet50':
            path = os.path.join(self.cheackpoint_path, self.generate_name())
            if os.path.exists(path):
                self.Load_model(path)
            else:
                print('Not Found the CheckPoint, Now Train and Save the Modle to Path <{}>'.format(path))
                
                if self.param['exp_type'] == 'robust':
                    ## adversarial training the model
                    self.Train_robust_model(path)
                else:
                    ## traditional training the model
                    self.Train_model(path)
                
                print('Successfully Save the Model into Path <{}>',format(path))
                print('Save Done')
        
        self.net.eval()