import os

import torch
from torch import nn, optim
from torch.utils.data import DataLoader

import models.discriminator as Dis
import models.generator as Gen
import models.meta as meta
from data_set import MyDataSet
from models.detector_cnnlstm import CNNLSTMModel
import escape_rate 
def genmain(train_path="./models/cnn_lstm_model.pth"):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using device: ", device)

    g_input_dim = 118  # 输入生成器的维度
    g_output_dim = g_input_dim  # 生成器输出的维度
    d_input_dim = g_output_dim  # 判别器输入的维度
    d_output_dim = 1  # 判别器输出的维度

    m_output_dim = 5  # 集成模型输出的维度
    feature_ranges = {
        i: (-float("inf"), float("inf")) for i in range(g_input_dim)
    }  # 特征取值范围

    # 加载已训练好的CNN-LSTM模型
    trained_model = CNNLSTMModel().to(device)
    trained_model.load_state_dict(torch.load(train_path))
    trained_model.eval()  # 设置为评估模式
    # 将已训练好的模型添加到模型列表中
    models = [trained_model] * 2  # 这里你可以选择放入多个相同的训练好的模型，或者不同的模型

    # 其他模型仍然保留
    models = [model.to(device) for model in models]

    generator = Gen.Generator(
        input_dim=g_input_dim,
        output_dim=g_output_dim,
        feature_ranges=feature_ranges,
        # mask=torch.ones(g_input_dim).to(device),
    )  # 生成器实例

    discriminator = Dis.Discriminator(input_dim=d_input_dim)  # 判别器实例
    meta_model = meta.Meta_model(models=models).to(device=device)  # 元模型实例

    # 超参数配置
    alpha = 0.1  # 对抗损失和扰动约束的权重系数
    β = 0.003  # 生成器的学习率
    ζ = 0.0002  # 判别器的学习率
    μ = 0.6  # 动量因子
    ε = 0.0002  # 集成模型的学习率

    # 优化器配置
    opt_g = optim.SGD(generator.parameters(), lr=β, momentum=μ)
    opt_d = optim.Adam(discriminator.parameters(), lr=ζ)
    opt_meta = optim.SGD(meta_model.parameters(), lr=ε)
    ξ = 0.8  # 扰动约束的阈值

    epochs = 10  # 训练轮数

    # 加载数据集
    # train_data = MyDataSet(
    #     "./data/data_train.csv"
    # )  # 这里的路径应替换为您的实际数据文件路径
    # gene_data = MyDataSet("./data/data_gen.csv")  # 这里的路径应替换为您的实际数据文件路径
    train_data = MyDataSet(
        "./data/train_subset.csv"
    )  # 这里的路径应替换为您的实际数据文件路径
    gene_data = MyDataSet("./data/ren_gan_subset.csv")  # 这里的路径应替换为您的实际数据文件路径

    batch_size = 128  # 批次大小
    train_dataloader = DataLoader(
        train_data,
        batch_size=batch_size,
        shuffle=True,
        pin_memory=True,
    )  # 数据加载器
    gen_dataloader = DataLoader(
        gene_data,
        batch_size=batch_size,
        shuffle=True,
        pin_memory=True,
    )  # 生成器数据加载器
    # Remove existing log files if they exist
    log_files = ["./log/log_discriminator.txt", "./log/log_generator.txt", "./log/log_meta.txt","./log/log_training.txt"]
    if not os.path.exists("./log"):
        os.makedirs("./log")
    for file in log_files:
        if os.path.exists(file):
            os.remove(file)
        open(file, "x").close()
            
    prev_perturb = None  # 初始化扰动梯度存储


    for epoch in range(epochs):  # 遍历整个数据集N次
        print("Epoch: {}/{}".format(epoch,epochs ))
        for batch_idx, (real_datas, real_labels) in enumerate(train_dataloader):
            # print("Epoch: {}, Batch: {}".format(epoch, batch_idx))

            real_datas = real_datas.to(device)
            real_labels = real_labels.to(device)
            #         # --- 元学习阶段（步骤2-4）---
            m = meta.meta_batch_update(
                datas=real_datas,
                labels=real_labels,
                meta_model=meta_model,
                ξ=ξ,
                ε=ε,
            )
            with open(log_files[2], "a") as f:
                f.write(f"{m.items()}\n")
            # for x, label in zip(real_data, real_labels):
            #             # 生成对抗样本（步骤5-6）
            fake_samples = generator.generate_adv_samples(real_datas,real_labels, ξ=ξ)
                    # --- 计算扰动并应用动量法 ---
            perturbation, prev_perturb = Gen.apply_momentum_and_clip(
                grad=fake_samples - real_datas,  # 计算扰动梯度
                prev_grad=prev_perturb,  # 传入前一轮的梯度
                beta=β,
                u=μ,
                ξ=ξ
            )

            # 添加扰动到生成器的输出
            fake_samples = fake_samples + perturbation
            #             # 更新判别器（步骤11）
            d = Dis.update_discriminator(real_datas, fake_samples, opt_d, discriminator)
            with open(log_files[0], "a") as f:
                f.write(f"{d.items()}\n")
            #             # 更新生成器（步骤10）
            g = Gen.update_generator(
                fake_samples=fake_samples,
                target_label=real_labels,
                opt_g=opt_g,
                discriminator=discriminator,
                alpha=alpha,
                meta_model=meta_model,
                generator=generator,
            )
            with open(log_files[1], "a") as f:
                if g is not None:
                    f.write(f"{g.items()}\n")
            with open(log_files[3], "a") as f:
                f.write(f"Epoch {epoch+1}, Batch {batch_idx+1}, D Loss: {d['total_loss']:.4f}, "
                       f"G Loss: {g['total_loss']:.4f}, Perturbation: {torch.mean(perturbation).item():.4f}\n")

            #             # 扰动迭代约束（步骤7-8）
            #! //TODO: 未完成
            # apply_momentum_and_clip(
            #     grad=generator.current_perturb.grad,
            #     x_real=d,
            #     beta=β,
            #     u=μ,
            #     ξ=ξ,
            # )
        for batch_idx, (r_datas, r_labels) in enumerate(gen_dataloader):
            r_datas = r_datas.to(device)
            r_labels = r_labels.to(device)
            fakes = generator.generate_adv_samples(r_datas,r_labels, ξ=ξ)
            d = Dis.update_discriminator(r_datas, fakes, opt_d, discriminator)
            with open(log_files[0], "a") as f:
                f.write(f"{d.items()}\n")
            g = Gen.update_generator(
                fake_samples=fakes,
                target_label=r_labels,
                opt_g=opt_g,
                discriminator=discriminator,
                alpha=alpha,
                meta_model=meta_model,
                generator=generator,
            )
            with open(log_files[1], "a") as f:
                if g is not None:
                    f.write(f"{g.items()}\n")
            # 生成对抗样本并计算总体逃逸率
    escape_rate_val = escape_rate.escape_rate(model=trained_model, test_dataloader=train_dataloader, generator_model=generator, ξ=ξ)
    print(f"Overall Escape Rate: {escape_rate_val:.4%}")


    test_data = MyDataSet("./data/1.csv")
    test_dataloader = DataLoader(
        test_data,
        batch_size=batch_size,
        shuffle=True,
        pin_memory=True,
    )
    test_avarage_perturbation_ratio = 0
    test_pre = 0
    batchs = 0
    for batch_idx, (real_datas, real_labels) in enumerate(test_dataloader):
        batchs += 1
        real_datas = real_datas.to(device)
        real_labels = real_labels.to(device)
        # for real_datas, label in zip(real_datas, real_labels):
        #         # 生成对抗样本
        fake_samples = generator.generate_adv_samples(real_datas,real_labels, ξ=ξ)
        # Create a mask for non-zero elements in x
        non_zero_mask = real_datas != 0
        # Calculate perturbation ratio only for non-zero elements
        delta = (fake_samples - real_datas).abs()
        perturbation_ratio = torch.where(
            non_zero_mask, delta / real_datas.abs(), torch.zeros_like(real_datas)
        )
        # print(f"Average perturbation ratio: {perturbation_ratio.mean().item():.4%}")
        test_avarage_perturbation_ratio += perturbation_ratio.mean().item()
        r_pre = meta_model.predict(real_datas)
        pre = meta_model.predict(fake_samples)
        # print("pre:", pre, "r_pre:", r_pre, "label:", label)
        test_pre += (r_pre - pre).abs().mean(dim=0)
        print("delta:", pre - r_pre)
    print(
        "Average perturbation ratio: {:.4%}".format(
            test_avarage_perturbation_ratio / batchs
        )
    )
    print("pre:", test_pre / batchs)
    print("pre:", test_pre / batchs)
        # 保存训练好的模型
    torch.save(generator.state_dict(), "./models/generator.pth")
    print("模型已保存")


if __name__ == "__main__":

    genmain()
    import matplotlib.pyplot as plt

    # 读取日志数据
    epochs, d_losses, g_losses, perturbations = [], [], [], []
    with open("./log/log_training.txt", "r") as f:
        for line in f:
            parts = line.strip().split(", ")
            epochs.append(int(parts[0].split(" ")[1]))
            d_losses.append(float(parts[2].split(": ")[1]))
            g_losses.append(float(parts[3].split(": ")[1]))
            perturbations.append(float(parts[4].split(": ")[1]))

    # 绘制曲线
    plt.figure(figsize=(10, 5))
    plt.plot(epochs, d_losses, label="D Loss", color="red")
    plt.plot(epochs, g_losses, label="G Loss", color="blue")
    plt.plot(epochs, perturbations, label="Perturbation", color="green")
    plt.xlabel("Epochs")
    plt.ylabel("Loss / Perturbation")
    plt.title("Training Progress")
    plt.legend()
    plt.show()

