import swan_data_prepare
import utils
from utils import *
import torch
import torch.optim as optim
import model
from tqdm.auto import tqdm

def count_invalid(data):
    """
    统计数据中 NaN 值、-32768 和 9999.0 的总数，并返回它们的总和。

    参数：
    - data: torch.Tensor，输入数据张量。

    返回：
    - 这些特殊值数量的总和。
    """
    nan_count = torch.isnan(data).sum().item()
    minus_32768_count = torch.sum(data == -32768).item()
    nine_nine_nine_nine_count = torch.sum(data == 9999.0).item()
    return nan_count + minus_32768_count + nine_nine_nine_nine_count

def train_model (context_encoder, encoder, decoder, discriminator, dataloader, device, context_encoder_save,
                 decoder_save, latent_dim=128, lambda_kl=0.1, lambda_adv=0.01, lr=1e-4, epochs=100, verbose=True,
                ):
    '''
    训练OW-GAM模型的函数

    参数为：
        - device: torch.device，设备（CPU/GPU）。
    - latent_dim: int，潜在向量的维度。
    - lambda_kl: float，KL 损失的权重。
    - lambda_adv: float，对抗损失的权重。
    - lr: float，学习率。
    - epochs: int，训练轮数。
    - verbose: bool，是否打印训练日志。

    返回：训练好的模型和相关模型保存的参数
    '''
    loss_sum_lG = 100

    #损失函数
    loss = Loss()

    #优化器
    generator_params = list(context_encoder.parameters()) + list(encoder.parameters()) + \
                       list(decoder.parameters())

    discriminator_params = discriminator.parameters()


    optimizer_G = optim.Adam(generator_params, lr=lr)
    optimizer_D = optim.Adam(discriminator_params, lr=lr)

    #把模型和损失函数移动到设备上
    context_encoder.to(device)
    encoder.to(device)
    decoder.to(device)
    discriminator.to(device)

    for epoch in range(epochs):

        context_encoder.train()
        encoder.train()
        decoder.train()
        discriminator.train()

        for batch_idx, (buoy_data, wave_field) in tqdm(enumerate(dataloader), total=len(dataloader)):
            print(f"Batch {batch_idx + 1}:", f"buoy_data_batch_shape: {buoy_data.shape}",
                  f"wave_field_batch shape: {wave_field.shape}")
            
            #将数据移动到GPU
            # 浮标数据 (batch_size, 5, 3, 4)
            # 波场数据 (batch_size, 3, 128, 128)

            buoy_data = buoy_data.to(device)
            wave_field = wave_field.to(device)

            # 生成上下文向量C
            context_vector = context_encoder(buoy_data)

            #编码器生成z的均值和标准差
            # (batch_size, latent_dim)
            z_mean, z_var = encoder(wave_field, context_vector)

            #重参数化来采样
            z = loss.reparameterize(z_mean, z_var)

            #解码器生成预测波场
            # (batch_size, 3, 128, 128)
            reconstructed_wave_field = decoder(z, context_vector)

            #重构损失
            l_rec = loss.mse_loss(wave_field, reconstructed_wave_field)

            #KL损失
            l_KL = loss.kl_loss(z_mean, z_var)

            #将原始波长中值为0的位置在重建波场中同样设置为0， 0在归一化中变为-1
            mask = (wave_field == -1)
            wave_field_batch = reconstructed_wave_field.clone()
            wave_field_batch[mask] = -1
            reconstructed_wave_field = wave_field_batch

            #生成器对预测波场进行判别
            fake_preds = discriminator(reconstructed_wave_field)

            #生成器对抗损失
            l_adv_G = loss.bce_loss(fake_preds, torch.ones_like(fake_preds))

            #总生成器损失
            l_G = l_rec + lambda_kl * l_KL + lambda_adv * l_adv_G

            #优化生成器
            optimizer_G.zero_grad()
            l_G.backward()
            optimizer_G.step()

            #计算判别器损失
            real_preds = discriminator(wave_field)
            fake_preds = discriminator(reconstructed_wave_field.detach())

            l_adv_D = 0.5 * (loss.bce_loss(real_preds, torch.ones_like(real_preds)) + 
                             loss.bce_loss(fake_preds, torch.zeros_like(fake_preds)))
            
            #优化判别器
            optimizer_D.zero_grad()
            l_adv_D.backward()
            optimizer_D.step()

            if verbose:
                print(f"Epoch [{epoch +1}/{epochs}] Batch[{batch_idx + 1}/{len(dataloader)}]:"
                      f"Loss_G: {l_G.item():.4f}, Loss_D: {l_adv_D.item():.4f},"
                      f"L_rec: {l_rec.item():.4f}, KL: {l_KL.item():.4f}")
                
        #保存一下最优参数模型
        if l_G < loss_sum_lG:
                loss_sum_lG = l_G
                print(f"模型loss降低！最新loss值为：{loss_sum_lG},模型已保存！")
                torch.save(context_encoder.state_dict(), context_encoder_save)
                torch.save(decoder.state_dict(), decoder_save)

    return context_encoder, encoder, decoder, discriminator

#定义计算设备

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("loading swan data and buoy data......")

buoy_data = np.load("data/buoy_data/buoy_data_train.npy")
buoy_data = torch.tensor(buoy_data)
swan_data = swan_data_prepare.combine_monthly_data("./data/swanSula_data/swanSula", 2018, 2020)
context_encoder_save = "./net/OW-DGM_context_encoder_model.pth"
decoder_save = "./net/OW-DGM_decoder_model.pth"


# 把双精度张量转换成单精度
swan_data = torch.tensor(swan_data).float()
print(swan_data.dtype)  #输出为 torch.float32
# 查看swan数据中的Nan值数量
# num_nan_swan = count_invalid(swan_data)
# num_nan_buoy = count_invalid(buoy_data)
# print(f"Swan和Buoy数据中无效值的数量分别为: {num_nan_swan},{num_nan_buoy}")

#创建数据集和数据加载器
dataset = TimeSeriesDataset(buoy_data, swan_data, batch_size=64)
dataloader = DataLoader(dataset, batch_size=None, shuffle=False)

#训练模型

context_encoder = model.ContextualEncoder()
OW_encoder = model.WaveFieldEncoderWithBuoy()
OW_decoder = model.WaveFieldDecoder()
OW_discriminator = model.Discriminator()

trained_model = train_model(context_encoder=context_encoder, encoder=OW_encoder, decoder=OW_decoder, 
                            discriminator=OW_discriminator, dataloader=dataloader, device=device, 
                            context_encoder_save=context_encoder_save, decoder_save=decoder_save, 
                            latent_dim=128, lambda_kl=0.8, lambda_adv=0.01, lr=1e-4,
                            epochs=120, verbose=True)

#输出一下保存的地址
print("模型训练完成，参数已保存至：{}".format(str(context_encoder_save)))
