import numpy as np
import torch
import torch.nn as nn
import time
import cantera as ct
import os
import logging
import shutil
import argparse

from pathlib import Path
from datetime import datetime

from typing import Literal, Tuple, Optional

def print_log(message):
    print(message)
    logger.info(message)
    
# Logging setup
def setup_logging(model_dir: str) -> None:
    if not os.path.exists(model_dir):
        os.mkdir(model_dir)
        current_script = __file__  # Get the path of the current script
        print(current_script)
        shutil.copy(current_script, model_dir)  # Copy the script to the model directory
        
    logging.basicConfig(level=logging.INFO, 
                        filename=os.path.join(model_dir, 'log.training'),
                        filemode='a', format='%(asctime)s - %(message)s')
    

# Configuration class
class Config:
    def __init__(self):
        self.chem = 'Okafor2018_s59r356.yaml'
        
        gas_ = ct.Solution(self.chem)
        self.n_species = gas_.n_species
        
        del gas_
        
        self.time_step = 1e-6
        self.training_data_path = Path('/data/xiaok/0_NH3CH4_DNN/2_DNN_Models/training_br06/new_attempts.20250103/dataset_1Dflame_60NH3_ULFS_stage4.npy')
        self.test_data_path = Path('/data/xiaok/0_NH3CH4_DNN/2_DNN_Models/training_br06/new_attempts.20250101/dataset_1Dflame_60NH3_1e-6_100steps.npy')
        self.device = "cuda:4"
        self.layers = [2 + self.n_species] + [800] * 4 + [self.n_species - 1]
        self.optim_lr = 1e-3
        self.batch_size = 20000
        self.print_interval = 2
        self.save_interval = 20
        self.max_epoch = 1500
        self.decay_epochs = [500, 1000]
        self.validation_batches = 20
        
        self.training_target_min = np.empty(self.n_species - 1)
        
        self.random_seed = 42
        
        def set_seed(seed):
            #random.seed(seed)
            np.random.seed(seed)
            torch.manual_seed(seed) # CPU
            torch.cuda.manual_seed(seed) # GPU
            torch.cuda.manual_seed_all(seed) # All GPU
            os.environ['PYTHONHASHSEED'] = str(seed) # 禁止hash随机化
            torch.backends.cudnn.deterministic = True # 确保每次返回的卷积算法是确定的
            torch.backends.cudnn.benchmark = False # True的话会自动寻找最适合当前配置的高效算法，来达到优化运行效率的问题。False保证实验结果可复现
            
        set_seed(self.random_seed) 
        
    def log_config(self):
        
        print_log("Configuration Details:")
        print_log(f"{'Chemical Mechanism:':<25} {self.chem}")
        print_log(f"{'Number of Species:':<25} {self.n_species}")
        print_log(f"{'Time Step:':<25} {self.time_step}")
        print_log(f"{'Training Data Path:':<25} {self.training_data_path}")
        print_log(f"{'Test Data Path:':<25} {self.test_data_path}")
        print_log(f"{'Device:':<25} {self.device}")
        print_log(f"{'Layers:':<25} {self.layers}")
        print_log(f"{'Learning Rate:':<25} {self.optim_lr}")
        print_log(f"{'Batch Size:':<25} {self.batch_size}")
        print_log(f"{'Print Interval:':<25} {self.print_interval}")
        print_log(f"{'Save Interval:':<25} {self.save_interval}")
        print_log(f"{'Max Epochs:':<25} {self.max_epoch}")
        print_log(f"{'Decay Epochs:':<25} {self.decay_epochs}")
        print_log(f"{'Random Seed:':<25} {self.random_seed}")
        print_log('')

def BCT(x, lam = 0.1):
    return np.log(x) if lam == 0 else (np.power(x, lam) - 1)/lam

def rev_BCT(x, lam = 0.1):
    return np.exp(x) if lam == 0 else np.power(x*lam + 1, 1/lam)

def get_formation_enthalpies(mechanism):
    gas = ct.Solution(mechanism)
    gas.TPY = 298.15, ct.one_atm, "O2:1"
    formation_h = gas.partial_molar_enthalpies/gas.molecular_weights

    del gas

    return formation_h

class NN_MLP(nn.Module):
    def __init__(self, layer_info):
        super(NN_MLP, self).__init__()
        self.net = nn.Sequential()
        n = len(layer_info) - 1
        for i in range(n - 1):
            self.net.add_module('linear_layer_%d' %(i), nn.Linear(layer_info[i], layer_info[i + 1]))
            #self.net.add_module('batch_norm_%d' %(i), torch.nn.BatchNorm1d(layer_info[i + 1]))
            self.net.add_module('gelu_layer_%d' %(i), nn.GELU())
        self.net.add_module('linear_layer_%d' %(n - 1), nn.Linear(layer_info[n - 1], layer_info[n]))
        
    def forward(self, x):
        return self.net(x)
    
def preprocess_data(
    config: Config,
    res_np: np.ndarray, 
    n_species: int, 
    normalization: Literal['z-score', 'min-max'] = 'z-score',
    training_stats: Optional[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]] = None,
    training_target_min: np.ndarray = None,
) -> Tuple[torch.Tensor, torch.Tensor, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]:
    
    data_in = np.abs(res_np[:, :2 + n_species]).copy()
    data_out = np.abs(res_np[:, 4 + 2 * n_species:4 + 3 * n_species - 1]).copy()
    data_in[:, 2:] = BCT(data_in[:, 2:])
    data_out = BCT(data_out)
    data_target = data_out - data_in[:, 2:-1]
    
    for i in range(data_target.shape[1]):
        if training_target_min is None:
            col = data_target[:, i]
            min = np.min(np.abs(col[col != 0]))
            training_target_min[i] = min
            
            data_target[:, i] = np.arcsinh(data_target[:, i] / min)
        else:
            data_target[:, i] = np.arcsinh(data_target[:, i] / training_target_min[i])

    if training_target_min is None:
        config.training_target_min = training_target_min
    
    # Normalization
    if normalization == 'z-score':
        if training_stats is None:
            data_in_mean = data_in.mean(axis=0)
            data_in_std = data_in.std(axis=0, ddof=1)
            data_target_mean = data_target.mean(axis=0)
            data_target_std = data_target.std(axis=0, ddof=1)
            
            # make sure std not equal to 0 when appearing in denominator
            # data_in_std[data_in_std == 0] = 1
            # data_target_std[data_target_std == 0] = 1
            
            training_stats = (data_in_mean, data_in_std, data_target_mean, data_target_std)
        else:
            data_in_mean, data_in_std, data_target_mean, data_target_std = training_stats
        
        nn_input = (data_in - data_in_mean) / data_in_std
        nn_target = (data_target - data_target_mean) / data_target_std
    
    elif normalization == 'min-max':
        if training_stats is None:
            data_in_min = data_in.min(axis=0)
            data_in_max = data_in.max(axis=0)
            data_target_min = data_target.min(axis=0)
            data_target_max = data_target.max(axis=0)
            training_stats = (data_in_min, data_in_max, data_target_min, data_target_max)
        else:
            data_in_min, data_in_max, data_target_min, data_target_max = training_stats
        
        nn_input = (data_in - data_in_min) / (data_in_max - data_in_min)
        nn_target = (data_target - data_target_min) / (data_target_max - data_target_min)

    else:
        raise ValueError("Normalization method must be either 'z-score' or 'min-max'")
    
    return torch.tensor(nn_input).float(), torch.tensor(nn_target).float(), training_stats

def validate(
    config: Config,
    model: nn.Module, 
    nn_input_t: torch.Tensor, 
    nn_target_t: torch.Tensor, 
    nn_stats: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],  # (nn_in_mean, nn_in_std, nn_target_mean, nn_target_std)
    formation_enthalpies: torch.Tensor, 
    criterion: nn.Module, 
    NUM_vali: int,
    test: bool = False
) -> None:
    
    nn_in_mean, nn_in_std, nn_target_mean, nn_target_std = nn_stats
    
    loss1s = []; loss2s = []; loss3s = []; loss6s = [];
    for i in range(NUM_vali):
        if i==0:
            batch_input_t = nn_input_t[-config.batch_size*(i+1):].to(config.device)
            batch_target_t = nn_target_t[-config.batch_size*(i+1):].to(config.device)
        else:
            batch_input_t = nn_input_t[-config.batch_size*(i+1):-config.batch_size*(i)].to(config.device)
            batch_target_t = nn_target_t[-config.batch_size*(i+1):-config.batch_size*(i)].to(config.device)
            
        #print(-batch_size*(i+1),-batch_size*(i),batch_input_t.shape)
        ###### Combine NN
        model.eval()
        result = model(batch_input_t)
        model.train()
        ####################
        loss1 = criterion(result, batch_target_t[:,:config.n_species-1])
        Y_in = ((batch_input_t[:,2:-1]*(nn_in_std[2:-1]-nn_in_mean[2:-1]) + nn_in_mean[2:-1])*0.1 + 1)**10
        Y_out = (((result*(nn_target_std-nn_target_mean) + nn_target_mean) + (batch_input_t[:,2:-1]*(nn_in_std[2:-1]-nn_in_mean[2:-1]) + nn_in_mean[2:-1]))*0.1 + 1)**10
        Y_target = (((batch_target_t[:,:config.n_species-1]*(nn_target_std-nn_target_mean) + nn_target_mean) + (batch_input_t[:,2:-1]*(nn_in_std[2:-1]-nn_in_mean[2:-1]) + nn_in_mean[2:-1]))*0.1 + 1)**10
        loss2 = criterion(Y_out.sum(axis=1), Y_in.sum(axis=1))
        loss3 = criterion(Y_out, Y_target)
        Y_in_total = torch.cat((Y_in, (1-Y_in.sum(axis=1)).reshape(Y_in.shape[0],1)), axis = 1)
        Y_out_total = torch.cat((Y_out, (1-Y_out.sum(axis=1)).reshape(Y_out.shape[0],1)), axis = 1)
        Y_target_total = torch.cat((Y_target, (1-Y_target.sum(axis=1)).reshape(Y_target.shape[0],1)), axis = 1)
        loss6 = criterion((formation_enthalpies*Y_out_total).sum(axis=1), (formation_enthalpies*Y_target_total).sum(axis=1))/config.time_step
        
        loss1s.append(loss1.item())
        loss2s.append(loss2.item())
        loss6s.append(loss6.item())
        loss3s.append(loss3.item())
    
    loss1 = np.array(loss1s).mean()
    loss2 = np.array(loss2s).mean()
    loss6 = np.array(loss6s).mean()
    
    loss1_std = np.array(loss1s).std()
    loss2_std = np.array(loss2s).std()
    loss6_std = np.array(loss6s).std()
    
    loss1_max = np.array(loss1s).max()
    loss2_max = np.array(loss2s).max()
    loss6_max = np.array(loss6s).max()
    loss1_min = np.array(loss1s).min()
    loss2_min = np.array(loss2s).min()
    loss6_min = np.array(loss6s).min()
    
    loss3 = np.array(loss3s).mean()
    loss3_std = np.array(loss3s).std()
    loss3_max = np.array(loss3s).max()
    loss3_min = np.array(loss3s).min()
    
    if not test:
        print_log(f'Validation:')
    else:
        print_log(f'Test:')
        
    print_log(f'    Loss1: {loss1:.4e} ± {loss1_std:.4e} [{loss1_min:.4e}, {loss1_max:.4e}]')
    print_log(f'    Loss2: {loss2:.4e} ± {loss2_std:.4e} [{loss2_min:.4e}, {loss2_max:.4e}]')
    print_log(f'    Loss6: {loss6:.4e} ± {loss6_std:.4e} [{loss6_min:.4e}, {loss6_max:.4e}]')
    print_log(f'    Loss3: {loss3:.4e} ± {loss3_std:.4e} [{loss3_min:.4e}, {loss3_max:.4e}]')
    
    




# Main training loop
def train(config: Config) -> None:
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    model_dir = f'results/train_{"_".join(map(str, config.layers[1:-1]))}_{config.training_data_path.stem}_{timestamp}'
    setup_logging(model_dir)
    config.log_config()
    print_log(f'Saving to {model_dir}')
    
    res_np  = np.load(config.training_data_path)
    test_np = np.load(config.test_data_path)
    nn_input_t, nn_target_t, training_stats = preprocess_data(config, res_np, config.n_species,
                                                                normalization='min-max')
    nn_input_test, nn_target_test, _        = preprocess_data(config, test_np, config.n_species, training_stats=training_stats,
                                                                normalization='min-max')
    
    test_batches = config.validation_batches if config.validation_batches * config.batch_size <= nn_input_test.shape[0] else nn_input_test.shape[0] // config.batch_size
    
    nn_in_mean      = torch.tensor(training_stats[0]).float().to(config.device)
    nn_in_std       = torch.tensor(training_stats[1]).float().to(config.device)
    nn_target_mean  = torch.tensor(training_stats[2]).float().to(config.device)
    nn_target_std   = torch.tensor(training_stats[3]).float().to(config.device)
    
    nn_training_stats = (nn_in_mean, nn_in_std, nn_target_mean, nn_target_std)
    
    NUM = nn_input_t.shape[0]
    print_log(f"{'Training dataset size: ':<25}{NUM:>10}")
    print_log(f"{'Test dataset size: ':<25}{nn_input_test.shape[0]:>10}")
    print_log(f'Using {test_batches} batches for testing...\n')
    
    
    formation_enthalpies = get_formation_enthalpies(config.chem)
    formation_enthalpies = torch.tensor(formation_enthalpies).float().to(config.device)
    
    model       = NN_MLP(config.layers).to(config.device)
    criterion   = nn.L1Loss()
    optim       = torch.optim.Adam(model.parameters(), lr=config.optim_lr)
    
    print_log('Start Training...\n')
    
    last_time = time.time()
    for epoch in range(config.max_epoch):
        loss1s = []; loss2s = []; loss6s = []; loss3s = [];
        per = np.random.permutation(NUM-config.batch_size*config.validation_batches)
        for i in range((NUM-config.batch_size*config.validation_batches)//config.batch_size):
        #for i in range(10):
            a = per[i*config.batch_size:(i+1)*config.batch_size]
            batch_input_t = nn_input_t[a].to(config.device)
            batch_target_t = nn_target_t[a].to(config.device)
            
            ###### Combine NN
            result = model(batch_input_t)
            ####################
            loss1 = criterion(result, batch_target_t[:,:config.n_species-1])
            Y_in = ((batch_input_t[:,2:-1]*(nn_in_std[2:-1]-nn_in_mean[2:-1]) + nn_in_mean[2:-1])*0.1 + 1)**10
            Y_out = (((result*(nn_target_std-nn_target_mean) + nn_target_mean) + (batch_input_t[:,2:-1]*(nn_in_std[2:-1]-nn_in_mean[2:-1]) + nn_in_mean[2:-1]))*0.1 + 1)**10
            Y_target = (((batch_target_t[:,:config.n_species-1]*(nn_target_std-nn_target_mean) + nn_target_mean) + (batch_input_t[:,2:-1]*(nn_in_std[2:-1]-nn_in_mean[2:-1]) + nn_in_mean[2:-1]))*0.1 + 1)**10
            loss2 = criterion(Y_out.sum(axis=1), Y_in.sum(axis=1))
            loss3 = criterion(Y_out, Y_target)
            Y_in_total = torch.cat((Y_in, (1-Y_in.sum(axis=1)).reshape(Y_in.shape[0],1)), axis = 1)
            Y_out_total = torch.cat((Y_out, (1-Y_out.sum(axis=1)).reshape(Y_out.shape[0],1)), axis = 1)
            Y_target_total = torch.cat((Y_target, (1-Y_target.sum(axis=1)).reshape(Y_target.shape[0],1)), axis = 1)
            loss6 = criterion((formation_enthalpies*Y_out_total).sum(axis=1), (formation_enthalpies*Y_target_total).sum(axis=1))/config.time_step
            loss = loss1 + loss2 + loss6/1e13 + loss3
            optim.zero_grad()
            loss.backward()
            optim.step()
            
            loss1s.append(loss1.item())
            loss2s.append(loss2.item())
            loss6s.append(loss6.item())
            loss3s.append(loss3.item())
        
        loss1 = np.array(loss1s).mean()
        loss2 = np.array(loss2s).mean()
        loss6 = np.array(loss6s).mean()
        
        loss1_std = np.array(loss1s).std()
        loss2_std = np.array(loss2s).std()
        loss6_std = np.array(loss6s).std()
        
        loss1_max = np.array(loss1s).max()
        loss2_max = np.array(loss2s).max()
        loss6_max = np.array(loss6s).max()
        loss1_min = np.array(loss1s).min()
        loss2_min = np.array(loss2s).min()
        loss6_min = np.array(loss6s).min()
        
        loss3 = np.array(loss3s).mean()
        loss3_std = np.array(loss3s).std()
        loss3_max = np.array(loss3s).max()
        loss3_min = np.array(loss3s).min()
        
        if (epoch + 1) % config.print_interval == 0:
            print_log(f'Epoch: {epoch + 1}, Time: {(time.time() - last_time)/60:.4f}m')
            print_log(f'    Loss1: {loss1:.4e} ± {loss1_std:.4e} [{loss1_min:.4e}, {loss1_max:.4e}]')
            print_log(f'    Loss2: {loss2:.4e} ± {loss2_std:.4e} [{loss2_min:.4e}, {loss2_max:.4e}]')
            print_log(f'    Loss6: {loss6:.4e} ± {loss6_std:.4e} [{loss6_min:.4e}, {loss6_max:.4e}]')
            print_log(f'    Loss3: {loss3:.4e} ± {loss3_std:.4e} [{loss3_min:.4e}, {loss3_max:.4e}]')
            
            last_time = time.time()
            
            ####### validate
            validate(config, model, 
                        nn_input_t, nn_target_t, 
                        nn_training_stats,
                        formation_enthalpies, criterion, config.validation_batches)
            validate(config, model,
                        nn_input_test, nn_target_test,
                        nn_training_stats,
                        formation_enthalpies, criterion, test_batches, test=True)
            
        if (epoch + 1) % config.save_interval == 0:
            
            path = os.path.join(model_dir, 'df_nh3ch4.pt')
            # aggregate DNN models
            aggregate_net_dict = {}
            aggregate_net_dict['data_in_mean'] = training_stats[0]
            aggregate_net_dict['data_in_std'] = training_stats[1]
            aggregate_net_dict['data_target_mean'] = training_stats[2]
            aggregate_net_dict['data_target_std'] = training_stats[3]
            aggregate_net_dict['training_target_min'] = config.training_target_min
            aggregate_net_dict['net'] = model.state_dict()
            torch.save(aggregate_net_dict, path)
        if epoch in config.decay_epochs:
            config.optim_lr *= 0.1
            optim = torch.optim.Adam(model.parameters(), lr=config.optim_lr)
            print_log('learning rate decay to: {}'.format(config.optim_lr))
        

if __name__ == "__main__":
    
    logger = logging.getLogger(__name__)
    

    
    config = Config()
    train(config)
    logging.shutdown()
    
    exit(0)
    
