"""
train_3d
"""
import argparse
import os
import random
import time
import shutil
import yaml
import numpy as np

from mindspore import context, Model, Tensor, DynamicLossScaleManager
from mindspore import nn, load_checkpoint, load_param_into_net, set_seed
from mindspore.common import initializer
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.context import ParallelMode

from src import Callback3D
from src import data_parallel_3d
from src import LossFunc3D, CustomWithLossCell3D, LossToEval3D
from src import StepLR
from src import ResMLP
from src import plt_loss_func

#%% 设置全局随机数种子
# 确保实验可复现性，固定所有随机数生成器的初始状态
set_seed(2333)
np.random.seed(2333)
random.seed(2333)

def _make_paths_absolute(dir_, config):
    """
    将配置文件中所有以`_path`结尾的键对应的值转换为相对于dir_的绝对路径
    
    Args:
        dir_ (str): YAML配置文件所在目录
        config (dict): 配置信息字典
    
    Returns:
        Dict: 包含绝对路径的配置字典
    """
    for key in config.keys():
        if key.endswith("_path"):
            config[key] = os.path.join(dir_, config[key])
            config[key] = os.path.abspath(config[key])
        if isinstance(config[key], dict):
            config[key] = _make_paths_absolute(dir_, config[key])
    return config


def load_yaml_config(file_path):
    """
    加载YAML配置文件并处理路径
    
    Args:
        file_path (str): YAML配置文件路径
    
    Returns:
        Dict: 配置信息字典
    
    Supported Platforms:
        ``Ascend`` ``CPU`` ``GPU``
    """
    # 读取YAML配置文件
    with open(file_path, 'r') as stream:
        config = yaml.safe_load(stream)
    # 将所有相对路径转换为相对于当前工作目录的绝对路径
    config = _make_paths_absolute(os.path.abspath('.'), config)
    return config


def train(config):
    """
    3D湍流预测模型训练主函数
    
    Args:
        config (dict): 训练配置字典
    """
    # 加载数据标准化参数（标量场的最小值和最大值）
    sca_min = np.loadtxt(config["data_path"] + '/3d_min.dat')[-1]
    sca_max = np.loadtxt(config["data_path"] + '/3d_max.dat')[-1]

    # 数据并行模式初始化
    rank_id = get_rank()  # 当前进程的全局ID
    rank_size = get_group_size()  # 总进程数
    # 加载训练和验证数据集，按进程ID进行数据分片
    train_dataset = data_parallel_3d(config["data_path"] + '/train_data_3d.npy',
                                     rank_id, rank_size, config["batch_size"], is_train=True)
    val_dataset = data_parallel_3d(config["data_path"] + '/val_data_3d.npy',
                                   rank_id, rank_size, config["batch_size"], is_train=False)

    # 配置MindSpore并行训练环境
    device_num = int(os.getenv('RANK_SIZE'))
    context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
                                      gradients_mean=True, device_num=device_num,
                                      global_rank=0, parameter_broadcast=True)

    # 实例化前向网络（ResMLP架构）
    net_config = config["ResMLP"]
    net = ResMLP(input_num=net_config["input_num"], width=net_config["width"],
                 depth=net_config["depth"], output_num=net_config["output_num"])

    # 模型权重初始化或加载已有权重
    prefix = os.path.join(config["data_path"], "3d_network_example")
    if not os.path.exists(prefix):
        os.makedirs(prefix)
    model_file = "3d_net_best_" + os.environ["DEVICE_ID"] + ".ckpt"
    model_path = os.path.join(prefix, model_file)

    if not os.path.exists(model_path):
        # 使用Xavier初始化方法随机初始化网络权重
        print("Init Weight by XavierUniform!")
        for _, cell in net.cells_and_names():
            if isinstance(cell, nn.Dense):
                cell.weight.set_data(initializer.initializer(initializer.XavierUniform(),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
    else:
        # 加载已保存的模型权重继续训练
        print("Load existed Checkpoint!")
        param_dict = load_checkpoint(model_path)
        load_param_into_net(net, param_dict)
    
    # 定义损失函数（针对3D湍流预测的特殊损失函数）
    loss_fn = LossFunc3D(sca_min, sca_max)
    # 包装网络和损失函数
    loss_net = CustomWithLossCell3D(net, loss_fn)
    
    # 配置学习率调度策略（带warmup的阶梯式衰减）
    steps_per_epoch = train_dataset.get_dataset_size()
    lr_config = config["lr_scheduler"]
    lr_scheduler = StepLR(lr=lr_config["learning_rate"], epoch_size=lr_config["epoch_size"],
                          gamma=lr_config["gamma"], steps_per_epoch=steps_per_epoch,
                          max_epoch=config["epochs"], warmup_epochs=lr_config["warmup_epochs"])
    learning_rate = Tensor(lr_scheduler.get_lr())

    # 定义优化器（使用Adam优化算法）
    optim = nn.Adam(net.trainable_params(), learning_rate=learning_rate)

    # 构建评估网络和评估指标
    eval_loss = LossToEval3D(sca_min, sca_max)  # 用于评估的损失函数
    eval_net = CustomWithLossCell3D(net, eval_loss)  # 评估网络
    eval_net.set_train(False)  # 设置为评估模式

    # 定义训练回调函数（用于保存模型、记录训练过程）
    my_call = Callback3D(ckpt_path=model_path, eval_network=eval_net,
                         eval_1=train_dataset, eval_2=val_dataset)
    # 动态损失缩放管理器（用于混合精度训练）
    loss_scale_manager = DynamicLossScaleManager()
    # 构建MindSpore模型对象（设置为O2混合精度训练模式）
    model = Model(network=loss_net, optimizer=optim, amp_level="O2",
                  loss_scale_manager=loss_scale_manager)

    # 执行模型训练
    time1 = time.time()
    model.train(config["epochs"], train_dataset, callbacks=my_call, dataset_sink_mode=False)
    # 保存最佳模型到指定路径
    shutil.copy(model_path, config["model_path"])
    time2 = time.time()
    print('----------model.train time----------')
    print(time2-time1)

    # 绘制训练和验证损失曲线
    plt_loss_func(config["epochs"], my_call.train_loss_log, "train_loss.png", prefix=prefix)
    plt_loss_func(config["epochs"], my_call.val_loss_log, "val_loss.png",
                  is_train=False, prefix=prefix)


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--config_file_path', type=str, default="./configs/TurbAI_3D_ResMLP.yaml")
    input_args = parser.parse_args()
    return input_args


if __name__ == '__main__':
    args = parse_args()
    # 设置运行环境（昇腾芯片、图模式）
    device_id = int(os.getenv('DEVICE_ID'))
    context.set_context(mode=context.GRAPH_MODE, device_id=device_id, device_target="Ascend")
    init()  # 初始化分布式训练环境
    train_config = load_yaml_config(args.config_file_path)
    train(train_config)