################
#
# Deep Flow Prediction - N. Thuerey, K. Weissenov, H. Mehrotra, N. Mainali, L. Prantl, X. Hu (TUM)
#
# Main training script
#
################
import os, sys, random
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.optim as optim
from torch.cuda.amp import GradScaler, autocast

from DfpNet import TurbNetG, weights_init
import dataset
import utils

######## Settings ########
# 改进参数
lrG = 0.0004          # 调整为文献建议值
weight_decay = 1e-4   # 权重衰减
pressure_weight = 0.7  # 压力损失权重
use_amp = True        # 混合精度训练
max_grad_norm = 1.0   # 梯度裁剪阈值
iterations = 80000    # 增加到80k迭代
batch_size = 4        # 增加批量大小
decayLr = True
expo = 7              # 匹配网络模型
prop = None
saveL1 = True
doLoad = ""
##########################

prefix = ""
if len(sys.argv)>1:
    prefix = sys.argv[1]
    print("Output prefix: {}".format(prefix))

# 初始化最佳模型跟踪
best_loss = float('inf')
best_model_params = None

# 固定随机种子
seed = random.randint(0, 2**32 - 1)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)

# 数据加载
data = dataset.TurbDataset(prop, shuffle=1)
trainLoader = DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False)
print("Training batches: {}".format(len(trainLoader)))

# 模型初始化
netG = TurbNetG(channelExponent=expo, dropout=0.0)
print(netG)
netG.apply(weights_init)
if len(doLoad)>0:
    netG.load_state_dict(torch.load(doLoad))
netG.cuda()

# 优化器
optimizerG = optim.AdamW(netG.parameters(), 
                        lr=lrG,
                        betas=(0.9, 0.999),
                        weight_decay=weight_decay)

scaler = GradScaler(enabled=use_amp)

targets = Variable(torch.FloatTensor(batch_size, 3, 128, 128)).cuda()
inputs = Variable(torch.FloatTensor(batch_size, 4, 128, 128)).cuda()  # 调整为4通道
masks = Variable(torch.FloatTensor(batch_size, 1, 128, 128)).cuda()   # 管口掩码

epochs = int(iterations / len(trainLoader) + 0.5)

for epoch in range(epochs):
    print("Epoch {}/{}".format(epoch+1, epochs))
    netG.train()
    epoch_loss = 0.0
    
    for i, traindata in enumerate(trainLoader, 0):
        inputs_cpu, targets_cpu, masks_cpu = traindata  # 接收管口掩码
        inputs.data.copy_(inputs_cpu.cuda())
        targets.data.copy_(targets_cpu.cuda())
        masks.data.copy_(masks_cpu.cuda())
        
        if decayLr:
            currLr = utils.computeLR(epoch, epochs, lrG*0.1, lrG)
            if currLr < lrG:
                for g in optimizerG.param_groups:
                    g['lr'] = currLr

        with autocast(enabled=use_amp):
            gen_out = netG(inputs, masks)
            # 加权损失
            loss_p = nn.L1Loss()(gen_out[:, 0] * masks, targets[:, 0] * masks)  # 管口加权
            loss_v = nn.L1Loss()(gen_out[:, 1:], targets[:, 1:])
            loss = pressure_weight * loss_p + (1-pressure_weight) * loss_v

        optimizerG.zero_grad()
        scaler.scale(loss).backward()
        scaler.unscale_(optimizerG)
        torch.nn.utils.clip_grad_norm_(netG.parameters(), max_grad_norm)
        scaler.step(optimizerG)
        scaler.update()

        epoch_loss += loss.item()

        if i == len(trainLoader) - 1:
            logline = "Epoch: {}, batch-idx: {}, L1: {}\n".format(epoch, i, epoch_loss)
            print(logline)
    
    avg_loss = epoch_loss / len(trainLoader)
    if avg_loss < best_loss:
        best_loss = avg_loss
        best_model_params = netG.state_dict().copy()

    # 可视化
    outputs = netG(inputs, masks)
    outputs_cpu = outputs.data.cpu().numpy()
    input_ndarray = inputs_cpu.cpu().numpy()[0]
    v_norm = (np.max(np.abs(input_ndarray[0, :, :]))**2 + np.max(np.abs(input_ndarray[1, :, :]))**2)**0.5
    outputs_denormalized = data.denormalize(outputs_cpu[0], v_norm)
    targets_denormalized = data.denormalize(targets_cpu.cpu().numpy()[0], v_norm)
    utils.makeDirs(["results_train"])
    utils.imageOut(f"results_train/epoch{epoch}_{i}", outputs_denormalized, targets_denormalized, saveTargets=True)

    if saveL1:
        if epoch == 0:
            utils.resetLog(prefix + "L1.txt")
        utils.log(prefix + "L1.txt", "{} ".format(avg_loss), False)

if best_model_params is not None:
    torch.save(best_model_params, prefix + "modelG")
else:
    torch.save(netG.state_dict(), prefix + "modelG")
print("训练完成 | 最佳损失: {:.4f}".format(best_loss))

'''
import os, sys, random
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.optim as optim

from DfpNet import TurbNetG, weights_init
import dataset
import utils

######## Settings ########

# number of training iterations
iterations = 300
# batch size
batch_size = 3
# learning rate, generator
lrG = 0.0006
# 动态调整学习率
decayLr = True
# channel exponent to control network size
expo = 5
# data set config
prop=None # by default, use all from "../data/train"
#prop=[1000,0.75,0,0.25] # mix data from multiple directories
# save txt files with per epoch loss?
saveL1 = False

##########################

prefix = ""
if len(sys.argv)>1:
    prefix = sys.argv[1]
    print("Output prefix: {}".format(prefix))

dropout    = 0.      # 原文该参数用来改善模型，效果不大，故设为0 取消了 
# note, the original runs from https://arxiv.org/abs/1810.08217 used slight dropout, but the effect is minimal; conv layers "shouldn't need" dropout, hence set to 0 here.
doLoad     = ""      # 是否加载预训练的模型

print("LR: {}".format(lrG))
print("LR decay: {}".format(decayLr))
print("Iterations: {}".format(iterations))
print("Dropout: {}".format(dropout))

##########################

seed = random.randint(0, 2**32 - 1)
print("Random seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
#torch.backends.cudnn.deterministic=True # warning, slower

# create pytorch data object with dfp dataset
data = dataset.TurbDataset(prop, shuffle=1)
trainLoader = DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False)
print("Training batches: {}".format(len(trainLoader)))


# setup training
epochs = int(iterations/len(trainLoader) + 0.5)
netG = TurbNetG(channelExponent=expo, dropout=dropout)
print(netG) # print full net
model_parameters = filter(lambda p: p.requires_grad, netG.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Initialized TurbNet with {} trainable params ".format(params))

netG.apply(weights_init)
if len(doLoad)>0:
    netG.load_state_dict(torch.load(doLoad))
    print("Loaded model "+doLoad)
netG.cuda()

criterionL1 = nn.L1Loss()
criterionL1.cuda()

optimizerG = optim.Adam(netG.parameters(), lr=lrG, betas=(0.5, 0.999), weight_decay=0.0)

targets = Variable(torch.FloatTensor(batch_size, 3, 128, 128))
inputs  = Variable(torch.FloatTensor(batch_size, 3, 128, 128))
targets = targets.cuda()
inputs  = inputs.cuda()

##########################

for epoch in range(epochs):
    print("Starting epoch {} / {}".format((epoch+1),epochs))

    netG.train()
    L1_accum = 0.0
    for i, traindata in enumerate(trainLoader, 0):
        inputs_cpu, targets_cpu = traindata
        targets_cpu, inputs_cpu = targets_cpu.float().cuda(), inputs_cpu.float().cuda()
        inputs.data.resize_as_(inputs_cpu).copy_(inputs_cpu)
        targets.data.resize_as_(targets_cpu).copy_(targets_cpu)

        # compute LR decay
        if decayLr:
            currLr = utils.computeLR(epoch, epochs, lrG*0.1, lrG)
            if currLr < lrG:
                for g in optimizerG.param_groups:
                    g['lr'] = currLr

        netG.zero_grad()
        gen_out = netG(inputs)

        lossL1 = criterionL1(gen_out, targets)
        lossL1.backward()

        optimizerG.step()

        lossL1viz = lossL1.item()
        L1_accum += lossL1viz

        if i==len(trainLoader)-1:
            logline = "Epoch: {}, batch-idx: {}, L1: {}\n".format(epoch, i, lossL1viz)
            print(logline)


        netG.eval()
    L1val_accum = 0.0

    inputs_cpu, targets_cpu = traindata
    targets_cpu, inputs_cpu = targets_cpu.float().cuda(), inputs_cpu.float().cuda()
    inputs.data.resize_as_(inputs_cpu).copy_(inputs_cpu)
    targets.data.resize_as_(targets_cpu).copy_(targets_cpu)

        

    outputs = netG(inputs)
    outputs_cpu = outputs.data.cpu().numpy()

    
    input_ndarray = inputs_cpu.cpu().numpy()[0]
    v_norm = ( np.max(np.abs(input_ndarray[0,:,:]))**2 + np.max(np.abs(input_ndarray[1,:,:]))**2 )**0.5

    outputs_denormalized = data.denormalize(outputs_cpu[0], v_norm)
    targets_denormalized = data.denormalize(targets_cpu.cpu().numpy()[0], v_norm)
    utils.makeDirs(["results_train"])
    utils.imageOut("results_train/epoch{}_{}".format(epoch, i), outputs_denormalized, targets_denormalized, saveTargets=True)
    
    # data for graph plotting
    L1_accum    /= len(trainLoader)
    if saveL1:
        if epoch==0: 
            utils.resetLog(prefix + "L1.txt"   )
            
        utils.log(prefix + "L1.txt"   , "{} ".format(L1_accum), False)
        

torch.save(netG.state_dict(), prefix + "modelG" )

'''