import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import torchvision.models as models
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL import Image
import numpy as np
import pandas as pd
import timm
import os
from tqdm import tqdm

DEBUG = True 
img_height, img_width = 224,224
img_max, img_min = 255., 0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

class AttackDataset(Dataset):
    def __init__(self, input_dir=None, targeted=False):
        self.input_dir = input_dir
        self.targeted = targeted
        self.f2l = self.load_labels(os.path.join(self.input_dir, 'labels.csv'))
        self.input_dir = os.path.join(self.input_dir, 'images')
        
    def __len__(self):
        return len(self.f2l.keys())
    def __getitem__(self, idx):
        filename = list(self.f2l.keys())[idx]
        filepath = os.path.join(self.input_dir, filename)
        
        if not os.path.exists(filepath):
            raise FileNotFoundError(f'{filepath} does not exist')
        
        image = Image.open(filepath).resize((img_height, img_width)).convert('RGB')
        image = (np.array(image) / 225).astype(np.float32)
        image = torch.from_numpy(image).permute(2, 0, 1)
        label = self.f2l[filename]
        
        return image, label, filename
    
    def load_labels(self, csv_file):
        '''
        Loads the image filenames and their corresponding labels from the csv file.
        Args:
            csv_file(str): path to the csv file containing the image filenames and their corresponding labels.
        Returns:
            returns(dict): a dictionary mapping the image filenames to their corresponding labels.
        '''
        dev = pd.read_csv(csv_file)
        if self.targeted:
            f2l = {dev.iloc[i]['filename']: [dev.iloc[i]['label'],
                                             dev.iloc[i]['targeted_label']] for i in range(len(dev))}
        else:
            f2l = {dev.iloc[i]['filename']: dev.iloc[i]['label']
                   for i in range(len(dev))}
        return f2l
    
class EnsembleModel(nn.Module):
    def __init__(self, models, weight=None):
        '''
        essence different models for ensemble attack
        Args:
            model_names(list): list of models
            weight(tensor, optional): weight for each model, default=None
        '''
        super(EnsembleModel, self).__init__()
        self.models = models
        self.weight = weight
    def forward(self, x):
        output = []
        for model in self.models:
            output.append(model(x))
            outputs = torch.stack(output, dim=0)
        if self.weight is None:
            return torch.mean(outputs, dim=0)
        else:
            return torch.sum(outputs * self.weight.view(len(output), 1,1), dim=0)
    @staticmethod
    def _load_model(model_name):
        '''
        load model from timm library
        Args:
            model_name(str): name of the model
        Returns:
            returns: loaded model
        '''
        if model_name == 'vit_base_patch16_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/vit_base_patch16_224.bin'))                
        elif model_name == 'deit_tiny_patch16_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/deit_tiny_patch16_224.bin'))
        elif model_name == 'deit_small_patch16_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/deit_small_patch16_224.bin'))                
        elif model_name == 'pit_ti_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/pit_ti_224.bin'))  
        elif model_name == 'pit_b_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/pit_b_224.bin'))              
        elif model_name == 'visformer_tiny':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/visformer_tiny.bin')) 
        elif model_name == 'visformer_small':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/visformer_small.bin'))           
        elif model_name == 'swin_tiny_patch4_window7_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/swin_tiny_patch4_window7_224.bin'))              
        elif model_name == 'swin_small_patch4_window7_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/swin_small_patch4_window7_224.bin'))             
        elif model_name == 'coat_tiny':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/coat_tiny.bin'))                              
        elif model_name == 'cait_s24_224':
            model = timm.create_model('cait_s24_224', pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/cait_s24_224.bin'))             
        elif model_name == 'deit_base_distilled_patch16_224':
            model = timm.create_model('deit_base_distilled_patch16_224', pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/deit_base_distilled_patch16_224.bin'))                 

        else:
            raise ValueError('Model {} not supported'.format(model_name))
        
        if hasattr(model, 'default_cfg'):
            mean = model.default_cfg['mean']
            std = model.default_cfg['std']
        else:
            mean = [0.485, 0.456, 0.406]
            std = [0.229, 0.224, 0.225]
        
        normalise = transforms.Normalize(mean, std)
        return torch.nn.Sequential(normalise, model).to(device)
        
#攻击类
class Attack:
    def __init__(self, essenble_model, epsilon, alpha, gamma, targeted=False):
        '''
        Args:
            essenble_model(list): list of loaded models for ensemble attack
            epsilon(float): maximum perturbation allowed
            alpha(float): step size for FGSM attack
            gamma(float): hyperparameter for PRA-EA attack
        '''
        self.essenble_model = essenble_model
        self.epsilon = epsilon
        self.alpha = alpha
        self.gamma = gamma
        self.targeted = targeted
        self.iter_cnt = 0
    def _get_KL_divergence(self, clean_logits, adv_logits):
        '''
        calculate KL divergence between clean and adversarial logits
        Args:
            clean_logits(tensor): clean input images logits
            adv_logits(tensor): adversarial images logits
        '''
        log_prob_adv = F.softmax(adv_logits, dim=1)
        log_prob_clean = F.softmax(clean_logits, dim=1)
        KL_divergence = F.kl_div(log_prob_adv, log_prob_clean, reduction='batchmean')
        return KL_divergence
    def _get_weight(self, clean_img, adv_img):
        '''
        Args:
            calculate weight for each model
            clean_img(tensor): clean input images
            adv_img(tensor): adversarial images
        '''
        KL_divergence = []
        for model in self.essenble_model.models:
            clean_out = model(clean_img)
            adv_out = model(adv_img)
            KL_divergence.append(self._get_KL_divergence(clean_out, adv_out))
            
        ck = torch.sum(torch.tensor(KL_divergence, device=device))
        sk = [ck / kl for kl in KL_divergence]
        weight = F.softmax(torch.tensor(sk, device=device), dim=0)
        return weight
    
    def _get_grad(self, image, label):
        '''
        calculate gradient for input images
        batchised to accelerate
        Args:
            image(tensor): input images
            label(tensor): input labels
        Returns: 
            gradient for each model(K,B,C,H,W) and ensemble model(B,C,H,W)
        '''
        B, C, H, W, = image.shape
        image = image.clone().detach().requires_grad_(True)
        K = len(self.essenble_model.models)
        
        #将输入图像在batch维度上K倍batch化
        image_k = image.repeat(K, 1, 1, 1)
        label_k = label.repeat(K)
        
        #逐个计算输出并拼接
        outputs = []
        for i, model in enumerate(self.essenble_model.models):
            outputs.append(model(image_k[B*i:B*(i+1)]))
        outputs = torch.cat(outputs, dim=0)
        outputs = F.softmax(outputs, dim=1)
        
        #计算损失并进一步计算梯度
        loss = F.cross_entropy(outputs, label_k)
        grad_all = torch.autograd.grad(outputs=loss, inputs=image_k, 
                                       grad_outputs=torch.ones_like(loss), create_graph=True, 
                                       retain_graph=True)[0]
        grad_per_models = grad_all.view(K, B, C, H, W)
        
        #计算集成模型的损失
        out_essenble = F.softmax(self.essenble_model(image), dim=1)
        loss_essenble = F.cross_entropy(out_essenble, label)
        grad_essenble = torch.autograd.grad(outputs=loss_essenble, inputs=image, 
                                            grad_outputs=torch.ones_like(loss_essenble), create_graph=True, 
                                            retain_graph=True)[0]
        return grad_per_models, grad_essenble
    
    def _get_cos_similarity_map(self, out_grad):
        '''
        calculate cosine similarity map for input gradient
        Args:
            out_grad(tensor:K,B,C,H,W): gradient for each model(K,B,C,H,W) and ensemble model(B,C,H,W)
            returns: cosine similarity map for each pixel(h,w)
        '''
        K,B,C,H,W = out_grad.shape
        sum_cos_similarity = 0
        #将五维输出梯度向量化（计算B,C维度上的余弦相似度）
        flat = out_grad.view(K, B * C, H * W)
        #在计算的维度上归一化
        flat = F.normalize(flat, dim=1)
        #两两进行内积得到相似度矩阵
        cos_map =  torch.einsum('kch, lch -> klh', flat, flat)
        #计算像素级相似度
        triu_mask = torch.triu_indices(K, K, offset=1)
        avg_cos_similarity = cos_map[triu_mask[0], triu_mask[1]].mean(0)
        return avg_cos_similarity.view(H, W)
    
    def _get_scale(self, out_grad, essenble_grad, gamma=0.1):
        '''
        calculate scale for input gradient
        Args:
            out_grad(tensor:K,B,C,H,W): gradient for each model(K,B,C,H,W) and ensemble model(B,C,H,W)
            essenble_grad(tensor): gradient for ensemble model(B,C,H,W)
        Returns:
            returns: scale for each pixel(h,w)
        '''
        cos_map = self._get_cos_similarity_map(out_grad)   # (H, W)
        M = torch.exp(gamma * cos_map) - cos_map * (cos_map - gamma)     #(H, W)
        M = M.unsqueeze(0).unsqueeze(0)   #(1,1,H,W)
        return M
    
    def _get_adv_img(self, grad, alpha, epsilon):
        '''
        using FGSM to generate adversarial images
        Args:
            grad(tensor): gradient for ensemble model(B,C,H,W)
            alpha(float): step size for FGSM attack
            epsilon(float): maximum perturbation allowed
        Returns:
            returns: adversarial images
        '''
        delta = torch.zeros_like(grad, device=device)
        delta = torch.clamp(alpha * grad.sign(), -epsilon, epsilon)    
        return delta
    
    def __call__(self, image, label):
        '''
        using PRA-EA to generate adversarial images
        Args:
            image(tensor): input images
            label(tensor): input labels
        Returns:
            returns: adversarial images, 
        '''
        #获取测试输出
        test_out = self. essenble_model(image)
        #计算输出梯度
        grad_per_models, grad_essenble = self._get_grad(image, label)
        #获取测试对抗样本
        test_adv = torch.clamp(image + self._get_adv_img(grad_essenble, self.alpha, self.epsilon), img_min, img_max)
        #计算测试对抗输出
        test_adv_out = self.essenble_model(test_adv)
        #计算权重
        weight = self._get_weight(image, test_adv)
        #对集成模型加权
        self.essenble_model.weight = weight
        #重新计算对抗输出
        essenble_out = self.essenble_model(test_adv)
        #计算对抗梯度
        grad_per_models, grad_essenble = self._get_grad(image, label)
        #计算对抗样本
        generated_adv = torch.clamp(image + self._get_adv_img(grad_essenble, self.alpha, self.epsilon), img_min, img_max)
        #计算对抗输出
        generated_adv_out = self.essenble_model(generated_adv)
        #获取分类结果
        test_res = torch.argmax(test_out, dim=1)
        test_adv_res = torch.argmax(test_adv_out, dim=1)
        generated_adv_res = torch.argmax(generated_adv_out, dim=1)
        #保存对抗样本
        if DEBUG:
            debug_dir = './debug_adv'         
            os.makedirs(debug_dir, exist_ok=True)
            # 保存对抗图
            save_image(test_adv[0]*0.5+0.5,  os.path.join(debug_dir, f'adv_{self.iter_cnt}.png'))
            # 保存扰动本身
            delta = generated_adv - image
            delta_vis = (delta - delta.min()) / (delta.max() - delta.min() + 1e-8)
            save_image(delta_vis[0], os.path.join(debug_dir, f'delta_{self.iter_cnt}.png'))

            self.iter_cnt += 1          
            
        return generated_adv, generated_adv_res, test_adv_res, test_res, test_adv

# 生成攻击方法
# 加载预训练模型
def load_models(model_names):
    '''
    load models
    Args:
        model_names(list):list of model names(str)
    Returns:
        returns(timm.model):list of models(timm.models)
    '''
    models = []
    for model_name in model_names:
        model = EnsembleModel._load_model(model_name)
        model.eval()
        models.append(model)
    return models

# 创建一个 AdvDataset 对象
def create_dataset(input_dir, targeted=False):
    '''
    Args:
        input_dir(str):image dataset path
        targeted(bool, optional):if the attack is targeted
    '''
    dataset = AttackDataset(input_dir=input_dir,  targeted=targeted)
    return dataset

def create_dataloader(dataset):
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
    return dataloader

# 创建一个 Attack 对象
def create_attack(ensemble_model, epsilon, alpha, attack_type, targeted=False):
    '''
    Args:
        ensemble_model(list[timm.model]):list of models
        epsilon(float):
        alpha(float):
        attack_type(str, optional)
        targeted(bool, optional)
    '''
    attack = Attack(ensemble_model, epsilon, alpha, attack_type, targeted=targeted)
    return attack

# 主方法
def attack_main(input_dir = '/data/coding/Test/data', output_dir = '/data/coding/Test/adv_output', 
                model_names = ['vit_base_patch16_224', 'deit_tiny_patch16_224', 'cait_s24_224', 'coat_tiny', 'pit_ti_224']):
    '''
    Args:
        input_dir(str, optional):image dataset path
        output_dir(str, optional):adversarial image output path
        model_names(list[str], optional):list of model names
    '''
    #加载模型
    models = load_models(model_names)
    ensemble_model = EnsembleModel(models).to(device)
    
    os.makedirs(output_dir, exist_ok=True)
    os.makedirs('/data/coding/Test/adv2_output', exist_ok=True)
    
    # 创建数据集和数据加载器
    dataset = create_dataset(input_dir, targeted=False)
    dataloader = create_dataloader(dataset)
    
    # 攻击参数
    epsilon = 0.1
    alpha = 0.01
    attack_type = 'FGSM'
    
    # 开始攻击 
    attack = create_attack(ensemble_model, epsilon, alpha, attack_type)
    correct = 0 
    success = 0 
    
    iter_cnt = 0
    for img, label, filename in tqdm(dataloader, desc="Attack"):
        img, label = img.to(device), label.to(device)    
        generated_adv, generated_adv_res, test_adv_res, test_res, test_adv = attack(img, label)
        iter_cnt += 1
        if iter_cnt > 1001:
            break
        
        # 打印结果
        print(test_res, label, test_adv_res, generated_adv_res)
        correct += (test_res == label).sum().item()
        success += (generated_adv_res != label).sum().item()
        
        #保存图像
        save_fp = os.path.join(output_dir, 'adv_'+filename[0])
        save_image(generated_adv, save_fp)
        save_fp = os.path.join('/data/coding/Test/adv2_output', 'adv2_'+filename[0])
        save_image(test_adv, save_fp) 

    total = len(dataloader.dataset)
    acc_clean = correct / total
    attack_success_rate = success / total

    print(f'Clean accuracy: {acc_clean*100:.2f}%')
    print(f'Attack success rate: {attack_success_rate*100:.2f}%')
    
if __name__ == '__main__':
    attack_main()