import torch
import torchvision.models as models
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms
from torchvision.utils import save_image
import torch.nn.functional as F
from PIL import Image
import numpy as np
import pandas as pd
import timm
import os
import math
from torch.utils.data.dataloader import default_collate
from tqdm import tqdm

#加载数据集
#图像的大小
img_height, img_width = 224, 224
img_max, img_min = 255., 0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

class AdvDataset(Dataset):
    def __init__(self, input_dir=None, output_dir=None, targeted=False, eval=False):
        self.targeted = targeted      # 是否是目标攻击
        self.data_dir = input_dir
        self.f2l = self.load_labels(os.path.join(self.data_dir, 'labels.csv'))

        if False:     #在评价模式下，加载生成的对抗样本
            self.data_dir = output_dir
            # load images from output_dir, labels from input_dir/labels.csv
            print('=> Eval mode: evaluating on {}'.format(self.data_dir))
        else:
            self.data_dir = os.path.join(self.data_dir, 'images')       #否则加载原始数据集
            print('=> Train mode: training on {}'.format(self.data_dir))
            print('Save images to {}'.format(output_dir))

    def __len__(self):
        return len(self.f2l.keys())

    def __getitem__(self, idx):
        filename = list(self.f2l.keys())[idx]
        filepath = os.path.join(self.data_dir, filename)
        if not os.path.exists(filepath):
            # 返回全 0 张量 + 占位标签
            # return torch.zeros(3, img_height, img_width), -1, filename
            raise ValueError('File {} not found'.format(filepath))

        image = Image.open(filepath).resize((img_height, img_width)).convert('RGB')
        image = (np.array(image) / 225).astype(np.float32)
        image = torch.from_numpy(image).permute(2, 0, 1)
        label = self.f2l[filename]
        
        return image, label, filename

    def load_labels(self, file_name):
        dev = pd.read_csv(file_name)
        if self.targeted:
            f2l = {dev.iloc[i]['filename']: [dev.iloc[i]['label'],
                                             dev.iloc[i]['targeted_label']] for i in range(len(dev))}
        else:
            f2l = {dev.iloc[i]['filename']: dev.iloc[i]['label']
                   for i in range(len(dev))}
        return f2l

#加载用于攻击的各种模型
#这个模块将各个模型输出的结果融合成一个新的张量
class  Model(torch.nn.Module):
    def __init__(self, model_names, mode='mean', weight=None):
        '''
        model_names(list): list of model names to be fused
        mode(str): how to fuse the outputs of the models, 'mean' or 'weighted'
        weight(list): list of weights for weighted fusion
        '''
        super(Model, self).__init__()
        self.models = model_names
        self.mode = mode
        self.weight = weight
    def forward(self, x):
        output = []
        for model in self.models:
            output.append(model(x))
        #拼接成一个新的张量
        outputs = torch.stack(output, dim=0)
        #返回各logits的平均值
        if self.weight is None:
            return torch.mean(outputs, dim=0)
        else:
            #返回各logits的加权平均值
            return torch.sum(outputs * self.weight.view(len(output), 1,1), dim=0)
    @staticmethod
    def _load_model(model_name):
        if model_name == 'vit_base_patch16_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/vit_base_patch16_224.bin'))                
        elif model_name == 'deit_tiny_patch16_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/deit_tiny_patch16_224.bin'))
        elif model_name == 'deit_small_patch16_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/deit_small_patch16_224.bin'))                
        elif model_name == 'pit_ti_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/pit_ti_224.bin'))  
        elif model_name == 'pit_b_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/pit_b_224.bin'))              
        elif model_name == 'visformer_tiny':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/visformer_tiny.bin')) 
        elif model_name == 'visformer_small':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/visformer_small.bin'))           
        elif model_name == 'swin_tiny_patch4_window7_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/swin_tiny_patch4_window7_224.bin'))              
        elif model_name == 'swin_small_patch4_window7_224':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/swin_small_patch4_window7_224.bin'))             
        elif model_name == 'coat_tiny':
            model = timm.create_model(model_name, pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/coat_tiny.bin'))                              
        elif model_name == 'cait_s24_224':
            model = timm.create_model('cait_s24_224', pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/cait_s24_224.bin'))             
        elif model_name == 'deit_base_distilled_patch16_224':
            model = timm.create_model('deit_base_distilled_patch16_224', pretrained=True, pretrained_cfg_overlay=dict(file='./vit_weight/deit_base_distilled_patch16_224.bin'))                 

        else:
            raise ValueError('Model {} not supported'.format(model_name))
        
        if hasattr(model, 'default_cfg'):
            mean = model.default_cfg['mean']
            std = model.default_cfg['std']
        else:
            mean = [0.485, 0.456, 0.406]
            std = [0.229, 0.224, 0.225]
        
        normalise = transforms.Normalize(mean, std)
        return torch.nn.Sequential(normalise, model).to(device)
         
#攻击类
class Attack:
    def __init__(self, essenble_model, epsilon, alpha, attack_type, gamma=0.5, targeted=False):
        self.essenble_model = essenble_model
        self.epsilon = epsilon
        self.alpha = alpha
        self.attack_type = attack_type
        self.targeted = targeted
        self.gamma = gamma
        self.iter_cnt = 0
    def _get_KL_divergence(self, clean_out, adv_out):
        #对抗
        log_prob_adv = F.softmax(adv_out, dim=1)
        #原始
        log_prob_clean = F.softmax(clean_out, dim=1)
        #计算KL散度
        KL_divergence = F.kl_div(log_prob_adv, log_prob_clean, reduction='batchmean')
        return KL_divergence
    def _get_weight(self, clean_img, adv_img):
        #计算各个模型的KL
        KL_divergence = []
        for model in self.essenble_model.models:
            clean_out = model(clean_img)
            adv_out = model(adv_img)
            KL_divergence.append(self._get_KL_divergence(clean_out, adv_out))
        #计算权重
        ck = torch.sum(torch.tensor(KL_divergence))
        sk = [ck / kl for kl in KL_divergence]
        weight = F.softmax(torch.tensor(sk, device=clean_img.device), dim=0)
        return weight
    def _get_grad(self, clean_img, label):
        clean_img = clean_img.clone().detach().requires_grad_(True)
        grad_models = []
        for model in self.essenble_model.models:
            out = F.softmax(model(clean_img), dim=1)
            loss = F.cross_entropy(out, label)
            #清空所有梯度
            model.zero_grad()
            grad = torch.autograd.grad(outputs=loss, inputs=clean_img, 
                                       grad_outputs=torch.ones_like(loss), create_graph=True, 
                                       retain_graph=True)[0]
            grad_models.append(grad)
        grad = torch.stack(grad_models, dim=0)
        
        out_essenble = F.softmax(self.essenble_model(clean_img), dim=1)
        loss = F.cross_entropy(out_essenble, label)
        #清空所有梯度
        self.essenble_model.zero_grad()
        grad_essenble = torch.autograd.grad(outputs=loss, inputs=clean_img, 
                                       grad_outputs=torch.ones_like(loss), create_graph=True, 
                                       retain_graph=True)[0]
        return grad, grad_essenble
    def _get_cos_similarity_map(self, out_grad):
        K = len(out_grad)
        B, C, H, W = out_grad[0].shape
        sum_cos_similarity = 0
        # for i in range(k):
        #     for j in range(i+1, k):
        #         model_i_vec = out_grad[i][:, :, p, q]
        #         model_i_vec = model_i_vec.view(1, -1)
        #         model_j_vec = out_grad[j][:, :, p, q]
        #         model_j_vec = model_j_vec.view(1, -1)
        #         cos_similarity = F.cosine_similarity(model_i_vec, model_j_vec, dim=1)
        #         sum_cos_similarity += cos_similarity
        #cos_similarity = sum_cos_similarity / (k*(k-1))
        K = len(out_grad)
        B, C, H, W = out_grad[0].shape
        # 0. 取出感兴趣的像素位置 (B, C, H*W)
        # 1. 把通道拉直成特征向量 (K, B*C, H*W)
        flat = out_grad.view(K, B * C, H * W)
        # 2. L2 归一化 → 单位向量
        flat = F.normalize(flat, dim=1)
        # 3. 两两内积 → (K, K, H*W) 相似度矩阵
        cos_mat = torch.einsum('kch, lch -> klh', flat, flat)
        # 4. 只取上三角平均（去掉对角线 & 重复）
        triu_idx = torch.triu_indices(K, K, offset=1)   # (T,)
        avg_cos = cos_mat[triu_idx[0], triu_idx[1]].mean(0)  # (H*W,)
        return avg_cos.view(H, W)                            # (H, W)
    def _get_scale(self, out_grad, grad_essenble, gamma):
        # M = torch.zeros_like(out_grad[0], device=device)
        # w, h = M.shape[-2], M.shape[-1]
        # for i in range(w):
        #     for j in range(h):
        #         cos_sim = self._get_cos_similarity(out_grad, grad_essenble, i, j)
        #         M[:, :, i, j] = torch.exp(gamma*cos_sim) - cos_sim*(cos_sim-gamma)
        cos_map = self._get_cos_similarity_map(out_grad)   # (H, W)
        M = torch.exp(gamma * cos_map) - cos_map * (cos_map - gamma)
        M = M.unsqueeze(0).unsqueeze(0)                  # (1,1,H,W) 自动广播到任意Batch/Channel
        return M
    def _get_adv_img(self, grad, alpha, epsilon):
        delta = torch.zeros_like(grad, device=device)
        delta = torch.clamp(alpha * grad.sign(), -epsilon, epsilon)    
        return delta
    def __call__(self, img, label):
        self.iter_cnt = getattr(self, 'iter_cnt', 0) + 1
        #获取测试输出
        test_out = self.essenble_model(img)
        #计算梯度
        grad, grad_essenble = self._get_grad(img, label)
        #获取对抗样本
        test_adv = torch.clamp(img + self._get_adv_img(grad_essenble, self.alpha, self.epsilon), img_min, img_max)
        #计算测试对抗输出
        test_adv_out = self.essenble_model(test_adv)
        #计算权重
        weight = self._get_weight(img, test_adv)
        #print(weight)
        #获取对抗样本
        #模型加权
        self.essenble_model.weight = weight
        #计算梯度
        grad, grad_essenble = self._get_grad(test_adv, label)
        #计算缩放尺度
        M = self._get_scale(grad, grad_essenble, self.gamma)
        #print(M)
        #计算对抗样本
        generated_adv = torch.clamp(img + self._get_adv_img(grad_essenble, self.alpha, self.epsilon) * M, img_min, img_max)
        #计算对抗输出
        generated_adv_out = self.essenble_model(generated_adv)
        #获取分类结果
        test_res = torch.argmax(test_out, dim=1)
        test_adv_res = torch.argmax(test_adv_out, dim=1)
        generated_adv_res = torch.argmax(generated_adv_out, dim=1)
        # >>>>>>> 新增：立刻保存扰动图 & 对抗图便于观测 <<<<<<<
        debug_dir = './debug_adv'          # 任意文件夹
        os.makedirs(debug_dir, exist_ok=True)

        # 1) 保存对抗图
        save_image(test_adv[0]*0.5+0.5,  os.path.join(debug_dir, f'adv_{self.iter_cnt}.png'))
        # 2) 保存扰动本身（归一化到[0,1]方便看）
        delta = test_adv - img
        delta_vis = (delta - delta.min()) / (delta.max() - delta.min() + 1e-8)
        save_image(delta_vis[0], os.path.join(debug_dir, f'delta_{self.iter_cnt}.png'))

        self.iter_cnt += 1          # 记得在__call__开头加一行 self.iter_cnt = getattr(self, 'iter_cnt', 0) + 1
        return generated_adv, generated_adv_res, test_adv_res, test_res, test_adv
    
# Test/main.py

# 加载预训练模型
def load_models(model_names):
    models = []
    for model_name in model_names:
        model = Model._load_model(model_name)
        model.eval()
        models.append(model)
    return models

# 创建一个 AdvDataset 对象
def create_dataset(input_dir, output_dir, targeted=False, eval=False):
    dataset = AdvDataset(input_dir=input_dir, output_dir=output_dir, targeted=targeted, eval=eval)
    return dataset

# 创建一个 DataLoader 对象
# def skip_missing_collate(batch):
#     """过滤掉 label=-1 的缺失样本"""
#     batch = [b for b in batch if b[1] != -1]
#     if len(batch) == 0:
#         # 本轮全缺失，返回空张量
#         return torch.empty(0, 3, img_height, img_width), \
#                torch.empty(0, dtype=torch.long), []
#     return default_collate(batch)
def create_dataloader(dataset):
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
    return dataloader

# 创建一个 Attack 对象
def create_attack(ensemble_model, epsilon, alpha, attack_type, targeted=False):
    attack = Attack(ensemble_model, epsilon, alpha, attack_type, targeted=targeted)
    return attack

# 测试代码
def main():
    # 模型名称列表
    model_names = ['vit_base_patch16_224', 'deit_tiny_patch16_224', 'cait_s24_224', 'coat_tiny', 'pit_ti_224']
    
    # 加载模型
    models = load_models(model_names)
    
    # 创建集成模型 + 搬设备
    ensemble_model = Model(models, mode='mean').to(device)
    
    # 数据集路径
    input_dir = '/data/coding/Test/data'  # 替换为实际输入目录路径
    output_dir = '/data/coding/Test/adv_output'  # 替换为实际输出目录路径
    os.makedirs(output_dir, exist_ok=True)   # 一次搞定递归创建
    if not os.path.exists('/data/coding/Test/adv2_output'):
        os.makedirs('/data/coding/Test/adv2_output')
    
    # 创建数据集和数据加载器
    dataset = create_dataset(input_dir, output_dir, targeted=False, eval=True)
    dataloader = create_dataloader(dataset)
    
    # 攻击参数
    epsilon = 0.1
    alpha = 0.01
    attack_type = 'FGSM'  # 可以根据需要更改攻击类型
    
    # 创建攻击对象
    attack = create_attack(ensemble_model, epsilon, alpha, attack_type, targeted=False)
    
    # 进行攻击
    correct = 0   # 原始样本分类正确数
    success = 0   # 对抗样本攻击成功数（标签改变）
    
    iter_cnt = 0
    for img, label, filename in tqdm(dataloader, desc="Attack"):
        img, label = img.to(device), label.to(device)    
        #print(img)  
        generated_adv, generated_adv_res, test_adv_res, test_res, test_adv = attack(img, label)
        iter_cnt += 1
        if iter_cnt > 1001:
            break
        
        # 打印结果
        # print(f'Original Image: {filename}, Original Label: {test_res.item()}, Adversarial Label: {test_adv_res.item()}')
        # print(f'Generated Adversarial Label: {generated_adv_res.item()}')
        # print(f'Weights: {weight}')
        print(test_res, label, test_adv_res, generated_adv_res)
        correct += (test_res == label).sum().item()
        success += (generated_adv_res != label).sum().item()
        
        #保存图像
        #逆归一化
        def tensor2im(t):
            return (t *0.5 + 0.5).clamp(0,1)
        save_fp = os.path.join(output_dir, 'adv_'+filename[0])
        save_image(generated_adv, save_fp)
        save_fp = os.path.join('/data/coding/Test/adv2_output', 'adv2_'+filename[0])
        save_image(test_adv, save_fp) 

    total = len(dataloader.dataset)
    #total = iter_cnt - 1
    acc_clean = correct / total
    attack_success_rate = success / total

    print(f'Clean accuracy: {acc_clean*100:.2f}%')
    print(f'Attack success rate: {attack_success_rate*100:.2f}%')
    
if __name__ == '__main__':
    main()