import numpy as np
import torch
import time
import cantera as ct
import os
import logging
import shutil
import argparse
from pathlib import Path
from datetime import datetime
from typing import Literal, Tuple, Optional

def print_log(message):
    print(message)
    logger.info(message)
    
# Logging setup
def setup_logging(model_dir: str) -> None:
    if not os.path.exists(model_dir):
        os.mkdir(model_dir)
        current_script = __file__  # Get the path of the current script
        print(current_script)
        shutil.copy(current_script, model_dir)  # Copy the script to the model directory
        
    logging.basicConfig(level=logging.INFO, 
                        filename=os.path.join(model_dir, 'log.training'),
                        filemode='a', format='%(asctime)s - %(message)s')
    

# Configuration class
class Config:
    def __init__(self):
        self.chem = 'Okafor2018_s59r356.yaml'
        
        gas_ = ct.Solution(self.chem)
        self.n_species = gas_.n_species
        
        del gas_
        
        self.time_step = 1e-6
        self.training_data_path = Path('/data/home/2301213115/dataset_CV_750w_1w.npy')
        self.test_data_path = Path('/data/home/2301213115/dataset_CV_750w_1w.npy')
        self.device = "Ascend"
        self.layers = [2 + self.n_species] + [800] * 4 + [self.n_species - 1]
        self.optim_lr = 1e-3
        self.batch_size = 200
        self.print_interval = 2
        self.save_interval = 20
        self.max_epoch = 1500
        self.decay_epochs = [500, 1000]
        self.validation_batches = 20

        self.random_seed = 42
        
    def log_config(self):
        
        print_log("Configuration Details:")
        print_log(f"{'Chemical Mechanism:':<25} {self.chem}")
        print_log(f"{'Number of Species:':<25} {self.n_species}")
        print_log(f"{'Time Step:':<25} {self.time_step}")
        print_log(f"{'Training Data Path:':<25} {self.training_data_path}")
        print_log(f"{'Test Data Path:':<25} {self.test_data_path}")
        print_log(f"{'Device:':<25} {self.device}")
        print_log(f"{'Layers:':<25} {self.layers}")
        print_log(f"{'Learning Rate:':<25} {self.optim_lr}")
        print_log(f"{'Batch Size:':<25} {self.batch_size}")
        print_log(f"{'Print Interval:':<25} {self.print_interval}")
        print_log(f"{'Save Interval:':<25} {self.save_interval}")
        print_log(f"{'Max Epochs:':<25} {self.max_epoch}")
        print_log(f"{'Decay Epochs:':<25} {self.decay_epochs}")
        print_log(f"{'Random Seed:':<25} {self.random_seed}")
        print_log('')

# ============================== 新增：MindSpore 依赖 ==============================
import mindspore as ms
from mindspore import nn, context, Tensor, Parameter, ops
from mindspore.common.initializer import Normal
from mindspore.dataset import GeneratorDataset
from mindspore.nn import TrainOneStepCell
# ==============================================================================

# ============================== 修改：设置全局设备 ==============================
context.set_context(device_target="Ascend")  # 替代原config.device设置
# ==============================================================================

def BCT(x, lam = 0.1):
    return np.log(x) if lam == 0 else (np.power(x, lam) - 1)/lam

def rev_BCT(x, lam = 0.1):
    return np.exp(x) if lam == 0 else np.power(x*lam + 1, 1/lam)

def get_formation_enthalpies(mechanism):
    gas = ct.Solution(mechanism)
    gas.TPY = 298.15, ct.one_atm, "O2:1"
    formation_h = gas.partial_molar_enthalpies/gas.molecular_weights

    del gas

    return formation_h

# ============================== 修改：模型定义 =================================
class NN_MLP(nn.Cell):
    def __init__(self, layer_info):
        super(NN_MLP, self).__init__()
        self.net = nn.SequentialCell()
        n = len(layer_info) - 1
        for i in range(n - 1):
            self.net.append(nn.Dense(layer_info[i], layer_info[i + 1], weight_init=Normal(0.02)))
            self.net.append(nn.GELU())
        self.net.append(nn.Dense(layer_info[n - 1], layer_info[n], weight_init=Normal(0.02)))
        
    def construct(self, x):
        return self.net(x)
# ==============================================================================
    
def preprocess_data(
    res_np: np.ndarray, 
    n_species: int, 
    normalization: Literal['z-score', 'min-max'] = 'z-score',
    training_stats: Optional[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]] = None
) -> Tuple[Tensor, Tensor, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]:
    
    data_in = np.abs(res_np[:, :2 + n_species]).copy()
    data_out = np.abs(res_np[:, 4 + 2 * n_species:4 + 3 * n_species - 1]).copy()
    data_in[:, 2:] = BCT(data_in[:, 2:])
    data_out = BCT(data_out)
    data_target = data_out - data_in[:, 2:-1]
    
    # Normalization
    if normalization == 'z-score':
        if training_stats is None:
            data_in_mean = data_in.mean(axis=0)
            data_in_std = data_in.std(axis=0, ddof=1)
            data_target_mean = data_target.mean(axis=0)
            data_target_std = data_target.std(axis=0, ddof=1)
            
            # make sure std not equal to 0 when appearing in denominator
            # data_in_std[data_in_std == 0] = 1
            # data_target_std[data_target_std == 0] = 1
            
            training_stats = (data_in_mean, data_in_std, data_target_mean, data_target_std)
        else:
            data_in_mean, data_in_std, data_target_mean, data_target_std = training_stats
        
        nn_input = (data_in - data_in_mean) / data_in_std
        nn_target = (data_target - data_target_mean) / data_target_std
    
    elif normalization == 'min-max':
        if training_stats is None:
            data_in_min = data_in.min(axis=0)
            data_in_max = data_in.max(axis=0)
            data_target_min = data_target.min(axis=0)
            data_target_max = data_target.max(axis=0)
            training_stats = (data_in_min, data_in_max, data_target_min, data_target_max)
        else:
            data_in_min, data_in_max, data_target_min, data_target_max = training_stats
        
        nn_input = (data_in - data_in_min) / (data_in_max - data_in_min)
        nn_target = (data_target - data_target_min) / (data_target_max - data_target_min)

    else:
        raise ValueError("Normalization method must be either 'z-score' or 'min-max'")
    
    return Tensor(nn_input).astype(ms.float32), Tensor(nn_target).astype(ms.float32), training_stats

def validate(
    config: Config,
    model: nn.Cell,
    nn_input_t: Tensor,
    nn_target_t: Tensor,
    nn_stats: Tuple[Tensor, Tensor, Tensor, Tensor],
    formation_enthalpies: Tensor,
    criterion: nn.Cell,
    NUM_vali: int,
    test: bool = False
) -> None:
    nn_in_mean, nn_in_std, nn_target_mean, nn_target_std = nn_stats

    # ===================== 初始化所有损失列表 =====================
    loss1s = []
    loss2s = []
    loss3s = []
    loss6s = []

    # ===================== 获取必要的操作符 =====================
    concat = ops.Concat(axis=1)
    reduce_sum = ops.ReduceSum()

    for i in range(NUM_vali):
        # ----------------- 数据切片 -----------------
        if i == 0:
            batch_input_t = nn_input_t[-config.batch_size * (i + 1):]
            batch_target_t = nn_target_t[-config.batch_size * (i + 1):]
        else:
            batch_input_t = nn_input_t[-config.batch_size * (i + 1): -config.batch_size * i]
            batch_target_t = nn_target_t[-config.batch_size * (i + 1): -config.batch_size * i]

        # ----------------- 前向计算 -----------------
        model.set_train(False)
        result = model(batch_input_t)
        model.set_train(True)

        # ----------------- 计算损失项 -----------------
        # Loss1: 预测误差
        loss1 = criterion(result, batch_target_t[:, :config.n_species - 1])

        # Loss2: 质量守恒误差
        Y_in = ((batch_input_t[:, 2:-1] * nn_in_std[2:-1] + nn_in_mean[2:-1]) * 0.1 + 1) ** 10
        Y_out = (((result * nn_target_std + nn_target_mean) + (batch_input_t[:, 2:-1] * nn_in_std[2:-1] + nn_in_mean[2:-1])) * 0.1 + 1) ** 10
        loss2 = criterion(reduce_sum(Y_out, axis=1), reduce_sum(Y_in, axis=1))

        # Loss3: 逆变换后误差
        Y_target = (((batch_target_t[:, :config.n_species - 1] * nn_target_std + nn_target_mean) + (batch_input_t[:, 2:-1] * nn_in_std[2:-1] + nn_in_mean[2:-1])) * 0.1 + 1) ** 10
        loss3 = criterion(Y_out, Y_target)

        # Loss6: 能量守恒误差
        Y_in_total = concat((Y_in, (1 - reduce_sum(Y_in, axis=1)).reshape(-1, 1)))  
        Y_out_total = concat((Y_out, (1 - reduce_sum(Y_out, axis=1)).reshape(-1, 1)))  
        Y_target_total = concat((Y_target, (1 - reduce_sum(Y_target, axis=1)).reshape(-1, 1)))  

        loss6 = criterion(
            reduce_sum(formation_enthalpies * Y_out_total, axis=1),
            reduce_sum(formation_enthalpies * Y_target_total, axis=1)
        ) / config.time_step

        # ----------------- 记录损失值 -----------------
        loss1s.append(loss1.asnumpy().item())  # 转换为Python float
        loss2s.append(loss2.asnumpy().item())
        loss3s.append(loss3.asnumpy().item())
        loss6s.append(loss6.asnumpy().item())

    # ===================== 计算统计量 =====================
    # 确保列表不为空
    assert len(loss1s) > 0, "No validation batches processed."

    # 转换为NumPy数组
    loss1s_np = np.array(loss1s)
    loss2s_np = np.array(loss2s)
    loss3s_np = np.array(loss3s)
    loss6s_np = np.array(loss6s)

    # 计算均值、标准差、最大/最小值
    loss1_mean = loss1s_np.mean()
    loss1_std = loss1s_np.std(ddof=1)  # 使用样本标准差
    loss1_max = loss1s_np.max()
    loss1_min = loss1s_np.min()

    loss2_mean = loss2s_np.mean()
    loss2_std = loss2s_np.std(ddof=1)
    loss2_max = loss2s_np.max()
    loss2_min = loss2s_np.min()

    loss6_mean = loss6s_np.mean()
    loss6_std = loss6s_np.std(ddof=1)
    loss6_max = loss6s_np.max()
    loss6_min = loss6s_np.min()

    loss3_mean = loss3s_np.mean()
    loss3_std = loss3s_np.std(ddof=1)
    loss3_max = loss3s_np.max()
    loss3_min = loss3s_np.min()

    # ===================== 打印日志 =====================
    log_prefix = 'Test' if test else 'Validation'
    print_log(f'{log_prefix}:')
    print_log(f'    Loss1: {loss1_mean:.4e} ± {loss1_std:.4e} [{loss1_min:.4e}, {loss1_max:.4e}]')
    print_log(f'    Loss2: {loss2_mean:.4e} ± {loss2_std:.4e} [{loss2_min:.4e}, {loss2_max:.4e}]')
    print_log(f'    Loss6: {loss6_mean:.4e} ± {loss6_std:.4e} [{loss6_min:.4e}, {loss6_max:.4e}]')
    print_log(f'    Loss3: {loss3_mean:.4e} ± {loss3_std:.4e} [{loss3_min:.4e}, {loss3_max:.4e}]')    

# Main training loop
def train(config: Config) -> None:
    # ===================== 初始化部分（与原版完全一致） =====================
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    model_dir = f'results/train_{"_".join(map(str, config.layers[1:-1]))}_{config.training_data_path.stem}_{timestamp}'
    setup_logging(model_dir)
    config.log_config()
    print_log(f'Saving to {model_dir}')
    
    # ===================== 数据加载与预处理（修改设备相关代码） =====================
    res_np  = np.load(config.training_data_path)
    test_np = np.load(config.test_data_path)
    nn_input_t, nn_target_t, training_stats = preprocess_data(res_np, config.n_species)
    nn_input_test, nn_target_test, _        = preprocess_data(test_np, config.n_species, training_stats=training_stats)
    
    # ===================== 修改1：批次计算逻辑对齐原版 =====================
    test_batches = config.validation_batches if config.validation_batches * config.batch_size <= nn_input_test.shape[0] else nn_input_test.shape[0] // config.batch_size
    test_batches = max(1, test_batches)  # 确保至少1个批次
    
    # ===================== 修改2：保持统计量转换逻辑 =====================
    nn_in_mean      = Tensor(training_stats[0], dtype=ms.float32)
    nn_in_std       = Tensor(training_stats[1], dtype=ms.float32)
    nn_target_mean  = Tensor(training_stats[2], dtype=ms.float32)
    nn_target_std   = Tensor(training_stats[3], dtype=ms.float32)
    
    nn_training_stats = (nn_in_mean, nn_in_std, nn_target_mean, nn_target_std)
    
    # ===================== 打印信息（与原版完全一致） =====================
    NUM = nn_input_t.shape[0]
    print_log(f"{'Training dataset size: ':<25}{NUM:>10}")
    print_log(f"{'Test dataset size: ':<25}{nn_input_test.shape[0]:>10}")
    print_log(f'Using {test_batches} batches for testing...\n')
    
    # ===================== 修改3：保持物性参数加载方式 =====================
    formation_enthalpies = Tensor(
        get_formation_enthalpies(config.chem), 
        dtype=ms.float32
    )
    
    # ===================== 模型初始化（修改设备设置） =====================
    model = NN_MLP(config.layers)
    criterion = nn.L1Loss()
    optim = nn.Adam(model.trainable_params(), learning_rate=config.optim_lr)
    
# ===================== MindSpore训练网络封装 =====================
    class CustomWithLossCell(nn.Cell):
        def __init__(self, net, loss_fn):
            super().__init__()
            self.net = net
            self.loss_fn = loss_fn
            self.formation_enthalpies = Tensor(formation_enthalpies)  # 假设已定义

        def construct(self, batch_input_t, batch_target_t):
            # 保持原有前向计算逻辑
            result = self.net(batch_input_t)
            
            # 保持原有损失计算逻辑（需确保所有操作使用MindSpore算子）
            Y_in = ((batch_input_t[:,2:-1] * nn_in_std[2:-1] + nn_in_mean[2:-1]) * 0.1 + 1) ** 10
            Y_out = (((result * nn_target_std + nn_target_mean) + (batch_input_t[:,2:-1] * nn_in_std[2:-1] + nn_in_mean[2:-1])) * 0.1 + 1) ** 10
            Y_target = (((batch_target_t[:,:config.n_species-1] * nn_target_std + nn_target_mean) + (batch_input_t[:,2:-1] * nn_in_std[2:-1] + nn_in_mean[2:-1])) * 0.1 + 1) ** 10
            
            loss1 = self.loss_fn(result, batch_target_t[:, :config.n_species-1])
            loss2 = self.loss_fn(Y_out.sum(axis=1), Y_in.sum(axis=1))
            loss3 = self.loss_fn(Y_out, Y_target)
            
            concat = ops.Concat(axis=1)
            Y_in_total = concat((Y_in, (1 - Y_in.sum(axis=1)).reshape(-1, 1)))
            Y_out_total = concat((Y_out, (1 - Y_out.sum(axis=1)).reshape(-1, 1)))
            Y_target_total = concat((Y_target, (1 - Y_target.sum(axis=1)).reshape(-1, 1)))
            
            loss6 = self.loss_fn((self.formation_enthalpies * Y_out_total).sum(axis=1),
                                (self.formation_enthalpies * Y_target_total).sum(axis=1)) / config.time_step
            
            # 保持损失组合方式
            total_loss = loss1 + loss2 + loss6/1e13 + loss3
            return total_loss

    # 构建训练网络
    net_with_loss = CustomWithLossCell(model, criterion)
    train_net = nn.TrainOneStepCell(net_with_loss, optim)

    print_log('Start Training...\n')
    
    # ===================== 训练循环（关键修改点） =====================
    last_time = time.time()
    for epoch in range(config.max_epoch):
        loss1s = []; loss2s = []; loss6s = []; loss3s = [];
        
        # ===================== 修改4：保持随机采样逻辑 =====================
        per = np.random.permutation(NUM - config.batch_size * config.validation_batches)
        num_batches = (NUM - config.batch_size * config.validation_batches) // config.batch_size
        
        for i in range(num_batches):
            a = per[i*config.batch_size:(i+1)*config.batch_size]
            
            # ===================== 修改5：数据切片保持原版逻辑 =====================
            batch_input_t = nn_input_t[a]
            batch_target_t = nn_target_t[a]
            
            loss = train_net(batch_input_t, batch_target_t)  
            
        # ===================== 保持模型保存逻辑 =====================
        if (epoch + 1) % config.save_interval == 0:
            # ===================== 修复保存逻辑 =====================
            # 将统计量转换为Parameter类型
            save_dict = {
                'data_in_mean': ms.Parameter(ms.Tensor(training_stats[0]), name='data_in_mean'),
                'data_in_std': ms.Parameter(ms.Tensor(training_stats[1]), name='data_in_std'),
                'data_target_mean': ms.Parameter(ms.Tensor(training_stats[2]), name='data_target_mean'),
                'data_target_std': ms.Parameter(ms.Tensor(training_stats[3]), name='data_target_std'),
            }
            
            # 合并模型参数（自动包含Parameter类型）
            save_dict.update({'net.' + k: v for k, v in model.parameters_dict().items()})
            
            # 使用新版保存API
            path = os.path.join(model_dir, f'df_nh3ch4_epoch{epoch+1}.ckpt')
            ms.save_checkpoint(save_dict, path)
            
        # ===================== 保持学习率衰减逻辑 =====================
        if epoch in config.decay_epochs:
            config.optim_lr *= 0.1
            optim = nn.Adam(model.trainable_params(), lr=config.optim_lr)
            print_log(f'learning rate decay to: {config.optim_lr}')


def set_seed(seed):
    # 设置全局随机种子（覆盖NumPy、MindSpore、CUDA）
    ms.set_seed(seed)
    
    # 配置确定性计算模式（可选，根据需求启用）
    context.set_context(
        deterministic='ON'  # 启用确定性算法（可能降低性能）
    )
    
    # 其他框架无关的设置
    np.random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)


if __name__ == "__main__":
    
    logger = logging.getLogger(__name__)
    

    
    config = Config()

    set_seed(42) 
        
    train(config)
    logging.shutdown()
    
    exit(0)
    
