import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt


# 设置随机种子，确保可复现
torch.manual_seed(0)
np.random.seed(0)


# 读取CSV文件并提取数据
def load_data_from_csv(csv_path):
    # 读取CSV文件
    data = pd.read_csv(csv_path)
    # 假设文件格式是：x, y, RSS1, RSS2, RSS3
    positions = data[['x', 'y']].values  # 获取位置
    rss_values = data[['RSS1', 'RSS2', 'RSS3']].values  # 获取RSS值
    return torch.tensor(positions, dtype=torch.float32), torch.tensor(rss_values, dtype=torch.float32)


# 生成器网络（生成三个RSS值）
class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(2, 128),  # 输入2维位置，输出128维
            nn.ReLU(),
            nn.Linear(128, 128),
            nn.ReLU(),
            nn.Linear(128, 3),  # 输出3维RSS值
            nn.Tanh()  # 使用Tanh来限制输出范围[-1, 1]，后续可以调整映射为实际的RSS范围
        )

    def forward(self, x):
        return self.fc(x)


# 判别器网络（输出标量，作为 critic score）
class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(5, 128),
            nn.ReLU(),
            nn.Linear(128, 128),
            nn.ReLU(),
            nn.Linear(128, 1)  # 输出标量，没有激活函数
        )

    def forward(self, x):
        return self.fc(x)


# 梯度惩罚的实现
def gradient_penalty(critic, real_data, fake_data, device):
    """计算梯度惩罚项"""
    batch_size, features = real_data.size()

    # 随机插值
    epsilon = torch.rand(batch_size, 1, device=device).expand_as(real_data)
    interpolated = epsilon * real_data + (1 - epsilon) * fake_data
    interpolated = interpolated.requires_grad_(True)

    # 计算 critic 对插值数据的输出
    interpolated_scores = critic(interpolated)

    # 计算梯度
    gradients = torch.autograd.grad(
        outputs=interpolated_scores,
        inputs=interpolated,
        grad_outputs=torch.ones_like(interpolated_scores),
        create_graph=True,
        retain_graph=True,
        only_inputs=True
    )[0]

    # 计算梯度的 L2 范数
    gradients = gradients.view(batch_size, -1)
    gradient_norm = gradients.norm(2, dim=1)

    # 计算梯度惩罚项
    penalty = ((gradient_norm - 1) ** 2).mean()
    return penalty


def train_wgan_gp():
    # 生成器和判别器实例
    generator = Generator()
    discriminator = Discriminator()

    # 优化器
    lr = 0.0002
    optimizer_g = optim.Adam(generator.parameters(), lr=lr, betas=(0.5, 0.999))
    optimizer_d = optim.Adam(discriminator.parameters(), lr=lr, betas=(0.5, 0.999))

    # 训练设置
    num_epochs = 10000
    batch_size = 64
    csv_file_path = './2.csv'
    lambda_gp = 10  # 梯度惩罚的权重
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    generator = generator.to(device)
    discriminator = discriminator.to(device)

    for epoch in range(num_epochs):
        # 读取CSV数据
        real_data, real_rss = load_data_from_csv(csv_file_path)

        # 输入rss归一化
        min_rss = -80
        max_rss = -20
        normalized_rss = (real_rss - min_rss) / (max_rss - min_rss) * 2 - 1

        # 随机选择一个batch
        indices = torch.randint(0, real_data.size(0), (batch_size,))
        real_data_batch = real_data[indices].to(device)
        real_rss_batch = normalized_rss[indices].to(device)

        # 合并真实数据位置和RSS值
        real_batch = torch.cat((real_data_batch, real_rss_batch), dim=1)

        # 判别器训练（K 次）
        for _ in range(5):  # 通常判别器多训练几次
            optimizer_d.zero_grad()

            # 生成假数据
            fake_rss_batch = generator(real_data_batch)
            fake_batch = torch.cat((real_data_batch, fake_rss_batch), dim=1)

            # 判别器输出分数
            real_score = discriminator(real_batch)
            fake_score = discriminator(fake_batch.detach())

            # Wasserstein 距离损失
            d_loss = fake_score.mean() - real_score.mean()

            # 计算梯度惩罚
            gp = gradient_penalty(discriminator, real_batch, fake_batch, device)

            # 总判别器损失
            d_total_loss = d_loss + lambda_gp * gp

            d_total_loss.backward()
            optimizer_d.step()

        # 生成器训练
        optimizer_g.zero_grad()

        # 生成器目标：最小化 fake_score
        fake_rss_batch = generator(real_data_batch)
        fake_batch = torch.cat((real_data_batch, fake_rss_batch), dim=1)
        g_loss = -discriminator(fake_batch).mean()

        g_loss.backward()
        optimizer_g.step()

        if epoch % 1000 == 0:
            print(f"Epoch [{epoch}/{num_epochs}], D Loss: {d_total_loss.item()}, G Loss: {g_loss.item()}")

    torch.save(generator.state_dict(), 'generator-2.pth')
    torch.save(discriminator.state_dict(), 'discriminator-2.pth')


if __name__ == "__main__":
    train_wgan_gp()
