import torch
import torch.nn as nn
import torch.nn.functional as F
from pkg_resources import safe_extra


class Generator(nn.Module):
    def __init__(self, input_dim, output_dim, feature_ranges):
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        super(Generator, self).__init__()
        # 定义生成器网络
        self.model = nn.Sequential(
            nn.Linear(input_dim, 256),
            nn.LayerNorm(256),  # 使用 LayerNorm 替代 BatchNorm
            nn.LeakyReLU(0.2),
            nn.Linear(256, 512),
            # nn.LayerNorm(512),
            nn.LeakyReLU(0.2),
            nn.Linear(512, 256),
            # nn.LayerNorm(256),
            nn.LeakyReLU(0.2),
            nn.Dropout(0.2),
            nn.Linear(256, output_dim),
            # nn.Tanh(),  # Helps bound the output
        )
        mask=create_mask()
        self.feature_ranges = feature_ranges  # 每个特征的取值范围
        self.mask = mask  # 特征掩码
        self.to(device)

    def forward(self, x,label, ξ: float = 0.1):
        # 生成扰动 G(x)
        if x.dim() == 1:
            x = x.unsqueeze(0)
        perturbation = self.model(x)

        # 将 label 转换为可哈希的元组键
        if torch.is_tensor(label):
            if label.dim() == 1:
                label_key = tuple(label.cpu().float().tolist())
            else:
                # 如果是批处理输入，取第一个样本的标签
                label_key = tuple(label[0].cpu().float().tolist())
        else:
            label_key = tuple(label)  # 如果已经是列表，直接转换为元组

        perturbation = perturbation * self.mask[label_key]
        # 进行扰动约束：确保特征值在指定范围内
        perturbation_update = self.apply_constraints(
            x=x, perturbation=perturbation, ξ=ξ
        )
        # 添加梯度裁剪防止梯度消失或爆炸
        # torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)

        return perturbation_update

    def apply_constraints(self, x, perturbation, ξ=0.1):
        # 计算最大允许扰动
        feature_abs = torch.abs(x)
        max_perturbation = feature_abs * ξ

        if perturbation.dim() == 1:
            perturbation = perturbation.unsqueeze(0)

        # 对扰动进行裁剪，确保在 [-max_perturbation, max_perturbation] 之间
        clamped_perturbation = torch.max(
            torch.min(perturbation, max_perturbation), -max_perturbation
        )


        return clamped_perturbation


    # def apply_constraints(self, x, perturbation, ξ=0.1):
    #     # 对数值型特征进行Clip处理，限制在规定范围内
    #     # for idx, (min_val, max_val) in self.feature_ranges.items():
    #     #     # 直接使用索引，因为是一维张量
    #     #     perturbation[idx] = torch.clamp(perturbation[idx].clone(), min_val, max_val)

    #     # 根据特征范围按比例约束扰动
    #     feature_abs = torch.abs(x)  # 获取特征范围
    #     # xi_tensor = torch.full_like(feature_abs, ξ)

    #     max_perturbation = feature_abs * ξ  # 计算扰动上限
    #     if perturbation.dim() == 1:
    #         perturbation = perturbation.unsqueeze(0)
    #     perturbation = torch.clamp(perturbation, max_perturbation, max_perturbation)

    #     # # 对离散型特征进行处理，如四舍五入或就近取整
    #     # perturbation = torch.round(perturbation)  # 对整数特征进行四舍五入
    #     #! //TODO: 二进制特征取整,决定是否需要对二进制特征取整
    #     return perturbation

    def generate_adv_samples( 
    self,
    x_real,
    x_label,
    ξ=0.1,
):
        # 生成初始扰动
        perturbation: torch.Tensor = self.forward(x_real, x_label, ξ=ξ)

        # 特征值约束处理
        x_adv = x_real + perturbation

        # 处理浮点误差，确保 0 和 1 被正确识别
        binary_mask = torch.isclose(x_real, torch.tensor(0.0, device=x_real.device)) | \
                    torch.isclose(x_real, torch.tensor(1.0, device=x_real.device))

        # print("原始数据 x_real:\n", x_real)
        
        # 对 0 和 1 位置进行四舍五入
        x_adv1 = torch.where(binary_mask,
                            torch.round(x_adv).detach()+ (x_adv - x_adv.detach()),
                            x_adv)

        # print("扰动后 x_adv:\n", x_adv)

        return x_adv1




def update_generator(
    fake_samples,
    target_label,
    opt_g,
    discriminator,
    alpha=0.1,
    meta_model=None,
    generator=None,
    ξ=0.1,
):
    # 确保数据在正确的设备上
    device = next(discriminator.parameters()).device
    target_label = target_label.to(device)

    opt_g.zero_grad()

    # 生成对抗样本并确保保留计算图
    fake_samples.requires_grad_(True)  # 确保需要梯度

    # GAN损失（欺骗判别器）
    d_fake = discriminator(fake_samples)
    g_gan_loss = F.binary_cross_entropy(d_fake, torch.ones_like(d_fake))
    # safe_lables = torch.zeros_like(target_label)
    # safe_lables[:, -1] = 1
    # 对抗损失（攻击集成模型）
    if meta_model is not None:
        with torch.no_grad():
            logits = meta_model.predict(fake_samples)
        adv_loss = F.cross_entropy(logits, target_label)

        # 总损失
        total_loss = adv_loss + alpha * g_gan_loss
    else:
        raise NotImplementedError("对抗损失需要集成模型")

    if total_loss.item() < 1e-8:
        total_loss = total_loss * 1e4
    # 反向传播前检查损失是否为nan
    if torch.isnan(total_loss):
        print("Warning: Loss is NaN!")
        return None

    total_loss.backward()

    # 检查梯度
    total_grad = 0
    for name, param in generator.named_parameters():
        if param.grad is not None:
            grad_norm = param.grad.norm().item()
            total_grad += grad_norm
    if total_grad == 0:
        print("Warning: Zero gradients detected!")
        return None

    opt_g.step()

    return {
        "total_loss": total_loss.item(),
        "total_grad": total_grad,
        "gan_loss": g_gan_loss.item(),
        "adv_loss": adv_loss.item(),
    }

def generate_masks_for_labels(data, device,g_input_dim):
    # 创建一个字典来存储每个标签对应的mask
    label_masks = {}
    label_indices = {
        tuple(torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0], dtype=torch.float32).tolist()): 0,
        tuple(torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0], dtype=torch.float32).tolist()): 1,
        tuple(torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0], dtype=torch.float32).tolist()): 2,
        tuple(torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0], dtype=torch.float32).tolist()): 2,
        tuple(
            torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0], dtype=torch.float32).tolist()
        ): torch.ones(g_input_dim, dtype=torch.float32).to(device),
    }

    # 遍历每个标签，筛选出符合条件的mask
    for label, idx in label_indices.items():
        if torch.all(torch.tensor(label, dtype=torch.float32).to(device) == torch.tensor([0, 0, 0, 0, 1], dtype=torch.float32).to(device)):
            selected_mask = idx
        else:
            selected_mask = data[idx].unsqueeze(0)
        label_masks[label] = selected_mask

    return label_masks
def create_mask():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 数据字符串
    data_str = """
    0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	1	1	1	1	1	1	1	1	1	1	1	1	1	0	0	0	0	0	0	0	0	0	1	1	1	1	1	1	1	1	1	1
    0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	1	1	1	1	1	1	1	1	1	1	1	1	1	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0
    0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	0	1	1	1	1	1	1	1	1	1	1	1	1	1	1	1	1	1	1	1
    """
    # 处理数据
    lines = data_str.strip().split('\n')
    data = []
    for line in lines:
        # 按制表符分割每行数据
        row = [int(x) for x in line.split('\t')]
        data.append(row)

    # 确定 g_input_dim
    g_input_dim = len(data[0])

    # 创建 mask
    mask_tensor = torch.tensor(data, dtype=torch.float32).to(device)

    # print("mask shape:", mask_tensor.shape)
    # print("mask:", mask_tensor)
    label_masks = generate_masks_for_labels(mask_tensor, device,g_input_dim)

    return label_masks

def apply_momentum_and_clip(grad, prev_grad, beta, u, ξ):
    """
    计算扰动梯度并进行动量更新，同时限制扰动范围
    grad: 当前梯度
    prev_grad: 上一次的梯度
    beta: 学习率
    u: 动量因子
    ξ: 扰动约束阈值
    """
    # 使用动量更新梯度
    if prev_grad is None or prev_grad.size() != grad.size():
        prev_grad = grad.clone().detach()  # 初始化时断开图
    else:
        prev_grad = u * prev_grad + (1 - u) * grad
        prev_grad = prev_grad.detach()  # 断开动量更新的计算图

    # 限制扰动大小，使其不超过 ξ
    perturbation = beta * prev_grad
    perturbation = torch.clamp(perturbation, -ξ, ξ)  # 确保扰动大小在 [-ξ, ξ] 之间

    return perturbation, prev_grad

# def update_generator(
#     x,
#     target_label,
#     opt_g,
#     discriminator,
#     alpha=0.1,
#     meta_model=None,
#     generator=None,
#     ξ=0.1,
# ):
#     # 确保数据在正确的设备上
#     device = next(discriminator.parameters()).device
#     opt_g.zero_grad()
#     fake_samples = generator(x, ξ=ξ)
#     target_label = target_label.to(device)

#     # GAN损失（欺骗判别器）
#     d_fake = discriminator(fake_samples)
#     g_gan_loss = F.binary_cross_entropy(d_fake, torch.ones_like(d_fake))
#     safe_label = torch.zeros_like(target_label)
#     safe_label[-1] = 1
#     # 对抗损失（攻击集成模型）
#     if meta_model is not None:
#         logits = meta_model.predict(fake_samples)
#         adv_loss = nn.CrossEntropyLoss()(logits, safe_label)
#         # adv_loss = nn.BCEWithLogitsLoss()(logits, safe_label)  # 各模型损失
#         # 使用KL散度和交叉熵的组合来增强对抗性学习

#         # probs = F.softmax(logits, dim=1)
#         # target_probs = torch.zeros_like(probs).scatter_(1, target_label.unsqueeze(1), 1)
#         # kl_div_loss = F.kl_div(probs.log(), target_probs, reduction="batchmean")
#         # adv_loss = adv_loss + kl_div_loss

#         # 总损失计算（α为平衡因子）
#         total_loss = adv_loss + alpha * g_gan_loss
#     else:
#         raise NotImplementedError("对抗损失需要集成模型")
#     # logits = meta_model.predict(fake_samples)  # 集成模型预测
#     # adv_loss = -F.cross_entropy(logits, target_label).to(device)

#     # # 总损失计算（α为平衡因子）
#     # total_loss = adv_loss + alpha * g_gan_loss

#     total_loss.backward()
#     total_grad = torch.norm(
#         torch.stack([torch.norm(p.grad) for p in generator.parameters()])
#     ).item()
#     opt_g.step()

#     return {
#         "total_loss": total_loss.item(),
#         "total_grad": total_grad,
#         "gan_loss": g_gan_loss.item(),
#         "adv_loss": adv_loss.item() if meta_model is not None else 0,
#     }
