import numpy as np
import torch
import torch.nn.functional as F

from defense.base_defense import BaseDefense

class Defense(BaseDefense):
    def __init__(self, model_name, config):
        super().__init__('TE', model_name, config, retrain = True)

    def get_model_options(self):
        return { 'thermometer_level': self.config['level'] }

    def one_hot_encoding(self, samples):
        level = self.config['level']
        device = samples.device
        if len(samples.shape) >= 4 and (samples.shape[1] == 1 or samples.shape[1] == 3):
            samples = samples.permute(0, 2, 3, 1)

        # inserting the last position for samples (handle the upper bound by multiplying 0.9999)
        discretized_samples = torch.unsqueeze(input=(0.99999 * samples * level).long().to(device), dim=4)
        # make the last dim be the level number
        shape = discretized_samples.shape
        # convert to one_hot encoding
        one_hot_samples = torch.zeros([shape[0], shape[1], shape[2], shape[3], level]).to(device).scatter_(-1, discretized_samples, 1)
        one_hot_samples = one_hot_samples.float()

        return one_hot_samples

    def transform_input(self, samples):
        level = self.config['level']
        if len(samples.shape) >= 4 and (samples.shape[1] == 1 or samples.shape[1] == 3):
            samples = samples.permute(0, 2, 3, 1)

        # convert one hot encoding to thermometer encoding
        one_hot_samples = self.one_hot_encoding(samples)
        therm_samples = torch.cumsum(one_hot_samples, dim=-1)

        # the returned samples is a type of numpy data with shape [BatchSize * (Channel * Level) * Weight* Height]
        shape = samples.shape
        therm_samples = torch.reshape(therm_samples, (shape[0], shape[1], shape[2], shape[3] * level))
        therm_samples = therm_samples.permute(0, 3, 1, 2)

        return therm_samples

    def LSPGA_attack(self, samples):
        # STEP 1: sub-routine for getting an \epsilon-discretized masked of an image
        lowest = torch.clamp(samples - self.config['attack_eps'], 0.0, 1.0)
        highest = torch.clamp(samples + self.config['attack_eps'], 0.0, 1.0)

        # get the masking of intervals between lowest and highest
        masked_intervals = 0.0
        for alpha in np.linspace(0., 1., self.config['level']):
            single_one_hot = self.one_hot_encoding(alpha * lowest + (1. - alpha) * highest)
            masked_intervals += single_one_hot
        masked = (masked_intervals > 0.0).float().to(self.model.device)

        shape = masked.shape

        # STEP 2: main function for generating adversarial examples using LS-PGA
        # init each of logits randomly with values sampled from a standard normal distribution.
        us_numpy = torch.randn(shape).numpy()

        # generating
        inv_temp = 1.0
        sigma_rate = 1.2
        self.model.eval()
        for i in range(self.config['steps']):
            us_logits = torch.from_numpy(us_numpy).to(self.model.device).float()
            us_logits.requires_grad = True
            # if not masked ( equal 0), turn it to be -inf (-99999)
            # then embedding the logits using softmax function with temperature to
            us_probs = F.softmax(inv_temp * (us_logits * masked - 999999.0 * (1. - masked)), dim=-1)

            # apply the cumulative sum function and reshape to get the distribution embedding
            thermometer_probs = torch.cumsum(us_probs, dim=-1)
            thermometer_probs = torch.reshape(thermometer_probs, (shape[0], shape[1], shape[2], shape[3] * self.config['level']))
            # convert the channel back to the second position
            thermometer_probs = thermometer_probs.permute(0, 3, 1, 2)

            logits = self.model(thermometer_probs)

            if i == 0:
                ys = torch.argmax(logits, dim=1)

            loss = F.cross_entropy(logits, ys)
            gradients = torch.autograd.grad(loss, us_logits)[0]
            signed_gradient = torch.sign(gradients).cpu().numpy()

            us_numpy += self.config['attack_step_size'] * signed_gradient
            inv_temp *= sigma_rate  # anneal the temperature via exponential decay with rate sigma

        us_logits = torch.from_numpy(us_numpy).to(self.model.device).float()
        logits_results = us_logits * masked - 999999.0 * (1. - masked)
        logits_final = torch.argmax(logits_results, dim=-1, keepdim=True)

        one_hot_adv_samples = torch.zeros([shape[0], shape[1], shape[2], shape[3], self.config['level']]).to(self.model.device).scatter_(-1, logits_final, 1)
        one_hot_adv_samples = one_hot_adv_samples.float()

        # the returned samples is a type of numpy dataset
        therm_adv_samples = torch.cumsum(one_hot_adv_samples, dim=-1)
        final_adv_samples = torch.reshape(therm_adv_samples, (shape[0], shape[1], shape[2], shape[3] * self.config['level']))
        final_adv_samples_numpy = final_adv_samples.permute(0, 3, 1, 2).cpu().numpy()

        return final_adv_samples_numpy

    def train(self, loader, opt, epoch):
        for ind, (images, labels) in enumerate(loader):
            timages = self.transform_input(images)
            images = images.to(self.model.device)
            labels = labels.to(self.model.device)

            # 进行LSPGA攻击
            np_advs = self.LSPGA_attack(images)

            # concatenate the nature samples and adversarial examples
            batch_images_numpy = np.concatenate((timages.numpy(), np_advs), axis=0)
            batch_images = torch.from_numpy(batch_images_numpy).to(self.model.device)
            # concatenate the true labels
            batch_labels = torch.cat((labels, labels), dim=0)

            self.model.train()

            output = self.model(batch_images)
            loss = F.cross_entropy(output, batch_labels)

            opt.zero_grad()
            loss.backward()
            opt.step()

            ind += 1
            print('\rEpoch {}: [batch: {}/{} ({:.0f}%)] \tLoss: {:.4f}'.format(
                epoch, ind, len(loader),
                ind / len(loader) * 100, loss), end=' ')

        print()

    def defend(self):
        batch_size = self.model.training_params['batch_size']
        #加载训练数据
        train_loader, valid_loader = self.model.get_train_valid_loader(batch_size = batch_size,
                                                                        valid_size = 0.1,
                                                                        transform_train = True,
                                                                        shuffle = True)
        #获取optimizer
        opt = self.model.get_optimizer()

        best_ratio = 0
        best_state_dict = None
        for epoch in range(self.model.training_params['num_epochs']):
            self.train(train_loader, opt, epoch)
            ratio = self.test(valid_loader)
            self.model.adjust_optimizer(opt, epoch)
            if ratio > best_ratio:
                best_ratio = ratio
                best_state_dict = self.model.state_dict_clone()
                print('Validate: Accuracy increased')
            else:
                print('Validate: Accuracy decreased')
            print()
        return best_state_dict
