import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

# 设置随机种子，确保可复现
torch.manual_seed(0)
np.random.seed(0)


# 读取CSV文件并提取数据
def load_data_from_csv(csv_path):
    # 读取CSV文件
    data = pd.read_csv(csv_path)
    # 假设文件格式是：x, y, RSS1, RSS2, RSS3
    positions = data[['x', 'y']].values  # 获取位置
    rss_values = data[['RSS1', 'RSS2', 'RSS3']].values  # 获取RSS值
    return torch.tensor(positions, dtype=torch.float32), torch.tensor(rss_values, dtype=torch.float32)


# 生成器网络（生成三个RSS值）
class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(2, 128),  # 输入2维位置，输出128维
            nn.ReLU(),
            nn.Linear(128, 128),
            nn.ReLU(),
            nn.Linear(128, 3),  # 输出3维RSS值
            nn.Tanh()  # 使用Tanh来限制输出范围[-1, 1]，后续可以调整映射为实际的RSS范围
        )

    def forward(self, x):
        return self.fc(x)


# 判别器网络（区分真实RSS值和生成的RSS值）
class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(5, 128),  # 输入5维（位置 + 3个RSS），输出128维
            nn.ReLU(),
            nn.Linear(128, 1),  # 输出一个值，用于判断真伪
            nn.Sigmoid()  # 输出概率值
        )

    def forward(self, x):
        return self.fc(x)


def svd_denoise(data, threshold=0.1):
    # 对数据进行奇异值分解
    U, S, Vt = np.linalg.svd(data, full_matrices=False)

    # 过滤掉小的奇异值
    S_filtered = S.copy()
    S_filtered[S < threshold] = 0  # 去除小的奇异值

    # 重构去噪后的数据
    denoised_data = np.dot(U, np.dot(np.diag(S_filtered), Vt))

    return denoised_data


def train_model():
    # 生成器和判别器实例
    generator = Generator()
    discriminator = Discriminator()

    # 优化器
    optimizer_g = optim.Adam(generator.parameters(), lr=0.0002, betas=(0.5, 0.999))
    optimizer_d = optim.Adam(discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))

    # 损失函数（二元交叉熵损失）
    criterion = nn.BCELoss()

    # 训练过程
    num_epochs = 10000
    batch_size = 64
    csv_file_path = './2.csv'  # 这里填入CSV文件路径

    for epoch in range(num_epochs):
        # 读取CSV数据
        real_data, real_rss = load_data_from_csv(csv_file_path)

        # 输入rss归一化
        min_rss = -80
        max_rss = -20
        normalized_rss = (real_rss - min_rss) / (max_rss - min_rss) * 2 - 1

        # 随机选择一个batch
        indices = torch.randint(0, real_data.size(0), (batch_size,))
        real_data_batch = real_data[indices]
        real_rss_batch = normalized_rss[indices]

        # 生成器生成的假RSS值
        fake_rss_batch = generator(real_data_batch)

        # 判别器训练：目标是分辨真实和假的RSS值
        optimizer_d.zero_grad()

        # 判别器对真实数据的判断
        real_labels = torch.ones(batch_size, 1)
        fake_labels = torch.zeros(batch_size, 1)

        real_output = discriminator(torch.cat((real_data_batch, real_rss_batch), dim=1))
        fake_output = discriminator(torch.cat((real_data_batch, fake_rss_batch.detach()), dim=1))  # 使用detach避免更新生成器

        d_loss_real = criterion(real_output, real_labels)
        d_loss_fake = criterion(fake_output, fake_labels)
        d_loss = d_loss_real + d_loss_fake

        d_loss.backward()
        optimizer_d.step()

        # 生成器训练：目标是生成更加逼真的RSS值
        for _ in range(2):
            optimizer_g.zero_grad()

            # 生成器通过判别器来训练
            fake_rss_batch = generator(real_data_batch)  # 重新生成假数据
            fake_output = discriminator(torch.cat((real_data_batch, fake_rss_batch), dim=1))  # 生成器生成的RSS值
            g_loss = criterion(fake_output, real_labels)  # 生成器希望判别器认为生成的RSS为真

            g_loss.backward()
            optimizer_g.step()

        if epoch % 1000 == 0:
            print(f'Epoch [{epoch}/{num_epochs}], D Loss: {d_loss.item()}, G Loss: {g_loss.item()}')

    # 保存模型的状态字典
    torch.save(generator.state_dict(), 'generator.pth')
    torch.save(discriminator.state_dict(), 'discriminator.pth')


if __name__ == "__main__":
    train_model()
