from json import load
from random import sample, shuffle
import re
from unittest import TestLoader
import torch
import numpy as np
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler
from models import *
import os
from utils import DemoTest
from torchvision.transforms.functional import rotate

## provide a API for network, such as ResNet, VGG16, ALLCNN
## provide a dataloader(a Class for evaluation, private class)
## code and decode the DNA for the noise
## Class Trainer for train a network
## provide different evaluation metics. 


class Dataer:
    def __init__(self, dataset_name='CIFAR-10'):

        self.root_path = './data'
        self.dataset_name = dataset_name

        transformer = transforms.Compose([transforms.ToTensor(), ])
        if dataset_name == 'MNIST':
            self.dataset = [
                torchvision.datasets.MNIST(root='./data/mnist', train=True, transform=transformer, download=True),
                torchvision.datasets.MNIST(root='./data/mnist', train=False, transform=transformer, download=True)                        
            ]   
        elif dataset_name == 'CIFAR-10':
            self.dataset = [
                torchvision.datasets.CIFAR10(root='./data/cifar-10-python', train=True, transform=transformer, download=True),
                torchvision.datasets.CIFAR10(root='./data/cifar-10-python', train=False, transform=transformer, download=True)                        
            ]   
        else:
            raise ValueError
        
        self.traindata_lenth = int(len(self.dataset[0]))
        self.testdata_lenth = int(len(self.dataset[1]))
    
    def get_loader(self, batch_size=128, isTrain=True, isShuffle=True):

        if isTrain:
            return DataLoader(self.dataset[0], batch_size=batch_size, num_workers=2, shuffle=isShuffle)
        else:
            return DataLoader(self.dataset[1], batch_size=batch_size, num_workers=2, shuffle=isShuffle)

    def get_sample(self, id=0, isShuffle=False, isFrom_testData=True):
        """
        get one sample for test, default is 0-th image without random, where wo could chose the sample or random chose.
        """
        image, label = None, None
        if isShuffle:
            image, label = next(iter(self.get_loader(batch_size=1, isTrain=isFrom_testData, isShuffle=True)))
        else:
            loader = self.get_loader(batch_size=1, isTrain=isFrom_testData, isShuffle=False)
            for index, (sample, target) in enumerate(loader):
                if index == id:
                    image, label = sample, target
                    break
                else:
                    continue
        return image, label
    
    def get_shape(self, ):

        loader = self.get_loader(batch_size=1)
        samples, targets = next(iter(loader))

        ## ignore the batch size
        return samples.shape
    
class Network:
    """
    create and train/test a network.
    """
    def __init__(self, dataer, param):
        """
        param (dict) : Include the parameters for network.
        """
        self.dataer =dataer
        
        self.root_path = './data'
        self.param = param
        self.model_arch = param['model_arch']
        self.device = param['device']
        self.cheackpoint_path = os.path.join(self.root_path, 'checkpoint')
        if os.path.exists(self.cheackpoint_path) == False:
            os.makedirs(self.cheackpoint_path)

        if self.model_arch == 'VGG16':
            self.net = VGG('VGG16').to(self.device)
        elif self.model_arch == 'ResNet18':
            self.net = ResNet18().to(self.device)
        else:
            raise ValueError
        
        ## Train or Load
        path = os.path.join(self.cheackpoint_path, self.generate_name())
        if os.path.exists(path):
            self.Load_model(path)
        else:
            print('Not Found the CheckPoint, Now Train and Save the Modle to Path <{}>'.format(path))
            self.Train_model(path)
            print('Successfully Save the Model into Path <{}>',format(path))
            print('Save Done')

    def Train_model(self, path):

        epochs = self.param['epochs']
        lr = self.param['lr']
        batch_size = self.param['batch_size']
        criterion = torch.nn.CrossEntropyLoss()
        train_loader = self.dataer.get_loader(isTrain=True, batch_size=batch_size, isShuffle=True)
        optimizer = torch.optim.SGD(self.net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)

        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)

        for epoch in range(1, epochs + 1):
            
            self.net.train()
            train_loss = 0.0
            correct = 0
            total = 0
            best_acc = 0.0

            for batch_idx, (inputs, targets) in enumerate(train_loader):
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                optimizer.zero_grad()
                outputs = self.net(inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()

                train_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()

            print('Epoch : [{}/{}], Loss : {:.3f}, Train Acc : {:.2f}%'.format(epoch, epochs + 1, train_loss / (batch_idx + 1), 100. * correct / total))

            scheduler.step()
            acc = self.Test_model()

            if acc > best_acc:
                print('Saving')
                self.Save_model(path)
                best_acc = acc

    def Save_model(self, path):
        torch.save(self.net.state_dict(), path)

    def Load_model(self, path):
        print('Load Model form Path : <{}>'.format(path))
        self.net.load_state_dict(torch.load(path))
        print('Load Done')

    def Test_model(self, ):
        self.net.eval()
        test_loss = 0
        correct = 0
        total = 0
        criterion = torch.nn.CrossEntropyLoss()
        test_loader = self.dataer.get_loader(isTrain=False, isShuffle=False, batch_size=100)
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(test_loader):
                inputs, targets = inputs.to(self.device), targets.to(self.device)
                outputs = self.net(inputs)
                loss = criterion(outputs, targets)
                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()
            print('Loss : {:.3f}, Acc : {:.2f}%'.format(test_loss/(batch_idx + 1), 100. * correct / total))
        return correct / total
        
    def get_prediction_score(self, sample, label):
        
        self.net.eval()
        with torch.no_grad():
            sample = sample.to(self.device)
            label = label.to(self.device)
            output = self.net(sample)
            _, predicted = output.max(1)
        
        return output, predicted
    
    def generate_name(self, ):
        
        return self.dataer.dataset_name + '_' + self.model_arch + '.pth'

class DemoEvaluation:

    def __init__(self, param):

        self.param = param
        self.fes = 0
        self.demoer = DemoTest('ZDT1')
    def eval(self, x):
        
        self.fes += 1

        return self.demoer.ZDT1(x)

class Evaluation:

    def __init__(self, param):
        """_summary_
        网络，图片，等等信息， 噪声通过解来生成，等等
        """
        self.param = param
        self.fes = 0
        self.dataer = Dataer(self.param['dataset'])
        self.network = Network(dataer=self.dataer, param=self.param)

        print('Preparing the experiment sample')
        ## make sure the experimet sample
        self.sample, self.label = self.dataer.get_sample() ## where the parameters in get_sample be default
        print('Experiment sample load done')

    def convert(self, x):
        """
        transfer DNA x to noise x_delta
        """
        shape = self.dataer.get_shape()
        return torch.from_numpy(x.reshape(shape))
    
    def get_figure(self, x=None, isClean=False):

        self.sample = self.sample.to(self.network.device)
        if isClean:
            return self.sample
        
        x = self.param['eps'] * x
        x_delta = self.convert(x).to(device=self.network.device, dtype=torch.float)

        adv_sample =  torch.clamp(self.sample + x_delta, min=0.0, max=1.0).detach()

        return adv_sample

    def testeval(self, x):
        
        self.fes += 1

        self.sample = self.sample.to(self.network.device)
        x = self.param['eps'] * x
        x_delta = self.convert(x).to(device=self.network.device, dtype=torch.float)

        adv_sample =  torch.clamp(self.sample + x_delta, min=0.0, max=1.0).detach()
        adv_sample = adv_sample.to(self.network.device)
        self.label = self.label.to(self.network.device)

        self.network.net.eval()

        f1 = 0.0
        adv_output = self.network.net(adv_sample)
        
        adv_output = self.network.net(adv_sample)
        _, adv_pred = adv_output.max(1)
        adv_label = adv_pred[0]

        softmax_layer = torch.nn.Softmax(dim=1)
        probability_output  = softmax_layer(adv_output).squeeze().detach().cpu().numpy()
        
        # f1 = torch.norm(x_delta).cpu().numpy()
        # f2 = probability_output[self.label[0]]

        # f1 = torch.norm(x_delta).cpu().numpy()
        # f2 = probability_output[self.label[0]]
        
        print('origin label : {}, max laberl : {}, probability : {}'.format(self.label[0], adv_label, probability_output[adv_label]))
        return probability_output[self.label[0]]


    def new_eval(self, x):

        self.fes += 1


        self.sample = self.sample.to(self.network.device)
        x = self.param['eps'] * x
        x_delta = self.convert(x).to(device=self.network.device, dtype=torch.float)

        adv_sample =  torch.clamp(self.sample + x_delta, min=0, max=1).detach()
        adv_sample = adv_sample.to(self.network.device)
        self.label = self.label.to(self.network.device)

        rotations = [60., 45., 30., 15., 0., -15., -30., -45., -60.]
        self.network.net.eval()

        f1 = 0.0
        probability_list = []
        for rotation in rotations:
            adv_image = rotate(adv_sample, angle=rotation)
            adv_output = self.network.net(adv_image)
        
        # output =  self.network.net(self.sample)
        # adv_output = self.network.net(adv_sample)
        # _, pred = output.max(1)
        # _, adv_pred = adv_output.max(1)
            softmax_layer = torch.nn.Softmax(dim=1)
            probability_output  = softmax_layer(adv_output).squeeze().detach().cpu().numpy()
            f1 += probability_output[self.label[0]]
            probability_list.append(probability_output[self.label[0]])
        
        f1 /= len(rotations)

        f2 = 0.0

        for prop in probability_list:

            f2 += (prop - f1)**2
        
        f2 /= len(rotations)

        # f1 = torch.norm(x_delta).cpu().numpy()
        # f2 = probability_output[self.label[0]]
        

        return np.array([f1, f2])

    def eval(self, x):
        """
        x: the solution
        1) x-->noise
        2) image<--image + x
        3) clamp image
        4) provide a np.array, the [noise norm, attack score] and so on. 
        5) could be overriden for implement different evaluation functions, just for different objective functions, where we 
            give a basic implemrnt for the main experiment goals.
        6) need to be trans to numpy
        """
        
        self.fes += 1
        self.sample = self.sample.to(self.network.device)
        x = self.param['eps'] * x
        x_delta = self.convert(x).to(device=self.network.device, dtype=torch.float)

        adv_sample =  torch.clamp(self.sample + x_delta, min=0, max=1).detach()
        adv_sample = adv_sample.to(self.network.device)
        self.label = self.label.to(self.network.device)

        self.network.net.eval()
        output =  self.network.net(self.sample)
        adv_output = self.network.net(adv_sample)
        # _, pred = output.max(1)
        # _, adv_pred = adv_output.max(1)
        softmax_layer = torch.nn.Softmax(dim=1)
        probability_output  = softmax_layer(adv_output).squeeze().detach().cpu().numpy()
        
        f1 = torch.norm(x_delta).cpu().numpy()
        f2 = probability_output[self.label[0]]
        

        return np.array([f1, f2])
        # return np.array([torch.max(torch.abs(x_delta)).cpu().numpy(), probability_output[self.label[0]]])

    def older_eval(self, x):
        """
        x: the solution [eps, delta]
        1) eps*sign(delta)->perturbation
        2) image<--image + perturbation
        3) clamp image
        4) provide a np.array, the [noise norm, attack score] and so on. 
        5) could be overriden for implement different evaluation functions, just for different objective functions, where we 
            give a basic implemrnt for the main experiment goals.
        6) need to be trans to numpy
        """
        
        self.fes += 1
        self.sample = self.sample.to(self.network.device)
        x_delta = self.convert(x[1:]).to(device=self.network.device, dtype=torch.float)
        x_delta = x[0] * torch.sign(x_delta)

        adv_sample =  torch.clamp(self.sample + x_delta, min=0, max=1).detach()
        adv_sample = adv_sample.to(self.network.device)
        self.label = self.label.to(self.network.device)

        self.network.net.eval()
        output =  self.network.net(self.sample)
        adv_output = self.network.net(adv_sample)
        # _, pred = output.max(1)
        # _, adv_pred = adv_output.max(1)
        softmax_layer = torch.nn.Softmax(dim=1)
        probability_output  = softmax_layer(adv_output).squeeze().detach().cpu().numpy()
        
        f1 = x[0]
        f2 = probability_output[self.label[0]]
        

        return np.array([f1, f2])
        # return np.array([torch.max(torch.abs(x_delta)).cpu().numpy(), probability_output[self.label[0]]])

    def get_decision_sapce_dim(self, ):

        shape = self.dataer.get_shape()
        return np.prod(list(shape))

    def set_param(self, ):
        """_summary_
        set different parameters for different experiment.
        """
        pass

    def set_samples(self, id=0, isShuffle=False, isFrom_testData=True):
        """
        chose different samples for experiment
        """
        self.sample, self.label = self.dataer.get_sample(id, isShuffle, isFrom_testData)


if __name__ == "__main__":

    dataer = Dataer()
    param = {
        'model_arch': 'VGG16',
        'device': 'cuda',
        'epochs': 200,
        'lr': 0.1,
        'batch_size': 128,
    }
    networker = Network(dataer, param)
    networker.Test_model()    
