"""
train_2d
"""
import argparse
import os
import random
import shutil
import yaml
import numpy as np

import mindspore.nn as nn
from mindspore import Tensor, Model, DynamicLossScaleManager, load_checkpoint, \
    load_param_into_net, context, set_seed, set_auto_parallel_context, ParallelMode

from mindspore.common import dtype as mstype
from mindspore.common import initializer
from mindspore.communication import init, get_rank, get_group_size

from src import Callback2D
from src import data_parallel_2d
from src import LossFunc2D, LossToEval2D, CustomWithLossCell2D
from src import StepLR
from src import MLP
from src import plt_loss_func


# 设置全局随机数种子
# 确保实验可复现性，固定所有随机数生成器的初始状态
set_seed(2333)
np.random.seed(2333)
random.seed(2333)

def _make_paths_absolute(dir_, config):
    """
    将配置文件中所有以`_path`结尾的键对应的值转换为相对于dir_的绝对路径
    
    Args:
        dir_ (str): YAML配置文件所在目录
        config (dict): 配置信息字典
    
    Returns:
        Dict: 包含绝对路径的配置字典
    """
    for key in config.keys():
        if key.endswith("_path"):
            config[key] = os.path.join(dir_, config[key])
            config[key] = os.path.abspath(config[key])
        if isinstance(config[key], dict):
            config[key] = _make_paths_absolute(dir_, config[key])
    return config


def load_yaml_config(file_path):
    """
    加载YAML配置文件并处理路径
    
    Args:
        file_path (str): YAML配置文件路径
    
    Returns:
        Dict: 配置信息字典
    
    Supported Platforms:
        ``Ascend`` ``CPU`` ``GPU``
    """
    # 读取YAML配置文件
    with open(file_path, 'r') as stream:
        config = yaml.safe_load(stream)
    # 将所有相对路径转换为相对于当前工作目录的绝对路径
    config = _make_paths_absolute(os.path.abspath('.'), config)
    return config


def train(config):
    """
    2D湍流预测模型训练主函数
    
    Args:
        config (dict): 训练配置字典
    """
    # 数据并行模式初始化，获取当前进程ID和总进程数
    rank_id = get_rank()
    rank_size = get_group_size()
    
    # 加载训练集和验证集
    train_path = config["data_path"] + '/train.txt'
    test_path = config["data_path"] + '/val.txt'
    train_dataset = data_parallel_2d(config, train_path, rank_id, rank_size, is_train=True)
    test_dataset_batch = data_parallel_2d(config, test_path, rank_id, rank_size, is_train=False)

    # 实例化多层感知机模型
    net = MLP(config["MLP"])

    # 模型保存路径设置
    prefix = os.path.join(config["data_path"], "2d_network_example")
    if not os.path.exists(prefix):
        os.makedirs(prefix)
    model_file = "2d_net_best_" + os.environ["DEVICE_ID"] + ".ckpt"
    model_path = os.path.join(prefix, model_file)
    
    # 模型权重初始化：若不存在预训练模型则使用Xavier初始化，否则加载已有权重
    if not os.path.exists(model_path):
        for _, cell in net.cells_and_names():
            if isinstance(cell, nn.Dense):
                cell.weight.set_data(initializer.initializer(initializer.XavierUniform(),
                                                             cell.weight.shape,
                                                             cell.weight.dtype))
    else:
        print("Load existed Checkpoint!")
        param_dict = load_checkpoint(model_path)
        load_param_into_net(net, param_dict)
    
    # 构建带损失函数的网络
    net_with_loss = CustomWithLossCell2D(net, LossFunc2D())
    
    # 配置动态学习率调度（带warmup的阶梯式衰减）
    steps_per_epoch = train_dataset.get_dataset_size()
    lr_config = config["lr_scheduler"]
    lr_scheduler = StepLR(lr=lr_config["learning_rate"], epoch_size=lr_config["epoch_size"],
                          gamma=lr_config["gamma"], steps_per_epoch=steps_per_epoch,
                          max_epoch=config["epochs"], warmup_epochs=lr_config["warmup_epochs"])
    lr_tensor = Tensor(lr_scheduler.get_lr(), mstype.float32)

    # 定义优化器（Adam算法）
    optim = nn.Adam(net.trainable_params(), learning_rate=lr_tensor)
    
    # 定义评估指标和评估网络
    eval_loss = LossToEval2D()
    eval_net = CustomWithLossCell2D(net, eval_loss)
    eval_net.set_train(False)  # 设置为评估模式

    # 设置训练回调函数（用于模型保存、指标记录等）
    my_call = Callback2D(model_path, network=net, eval_network=eval_net,
                         eval_1=train_dataset, eval_2=test_dataset_batch)

    # 配置动态损失缩放（用于混合精度训练）
    loss_scale = DynamicLossScaleManager()
    
    # 构建MindSpore模型（启用O2混合精度训练）
    model = Model(network=net_with_loss, optimizer=optim,
                  amp_level="O2", loss_scale_manager=loss_scale)

    # 执行模型训练
    model.train(config["epochs"], train_dataset, callbacks=my_call, dataset_sink_mode=False)
    
    # 保存最佳模型到指定路径
    shutil.copy(model_path, config["model_path"])

    # 绘制训练和验证损失曲线
    plt_loss_func(config["epochs"], my_call.train_loss_log, "train_loss.png", prefix=prefix)
    plt_loss_func(config["epochs"], my_call.val_loss_log, "test_loss.png", is_train=False,
                  prefix=prefix)


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--config_file_path', type=str, default="./configs/TurbAI_2D_MLP.yaml")
    input_args = parser.parse_args()
    return input_args


if __name__ == '__main__':
    # 解析命令行参数并设置运行环境
    args = parse_args()
    context.set_context(mode=context.GRAPH_MODE, device_id=int(os.environ["DEVICE_ID"]),
                        device_target="Ascend")
    
    # 初始化分布式训练环境
    init()
    device_num = int(os.getenv('RANK_SIZE'))
    set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
                              device_num=device_num, global_rank=0)
    
    # 加载配置文件并启动训练
    train_config = load_yaml_config(args.config_file_path)
    train(train_config)