import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
import cantera as ct
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker

import multiprocessing as mp
from pathlib import Path
from multiprocessing import Pool

# import scienceplots

# plt.style.use(['science',])


gas = ct.Solution('Okafor2018_s59r356.yaml')
n_species = gas.n_species

def BCT(x, lam = 0.1):
    return np.log(x) if lam == 0 else (np.power(x, lam) - 1)/lam

def rev_BCT(x, lam = 0.1):
    return np.exp(x) if lam == 0 else np.power(x*lam + 1, 1/lam)

class NN_MLP(torch.nn.Module):
    def __init__(self, layer_info):
        super(NN_MLP, self).__init__()
        self.net = torch.nn.Sequential()
        n = len(layer_info) - 1
        for i in range(n - 1):
            self.net.add_module('linear_layer_%d' %(i), torch.nn.Linear(layer_info[i], layer_info[i + 1]))
            self.net.add_module('gelu_layer_%d' %(i), torch.nn.GELU())
            #if i <= 2:
            #    self.net.add_module('batch_norm_%d' %(i), torch.nn.BatchNorm1d(layer_info[i + 1]))
        self.net.add_module('linear_layer_%d' %(n - 1), torch.nn.Linear(layer_info[n - 1], layer_info[n]))

    def forward(self, x):
        return self.net(x)

def process_data(data_arr):
    data_arr = data_arr[np.all(data_arr[:,2:2+n_species]>=0,axis=1)]

    mask = data_arr[:, 0] > 400
    data_arr = data_arr[mask]
    
    if data_arr.shape[0] > 10000:
        data_arr = data_arr[np.random.choice(data_arr.shape[0], 10000, replace=False)]

    print(data_arr.shape)
    
    return data_arr

def infer(data_arr):
    state_dict = torch.load('df_nh3ch4_multiEQR.pt',map_location='cpu')
    data_in_mean        = state_dict['data_in_mean']
    data_in_std         = state_dict['data_in_std']
    data_target_mean    = state_dict['data_target_mean']
    data_target_std     = state_dict['data_target_std']    
    device = 'cpu'
    layers = [2+n_species]+[800]*4+[n_species-1]


    model_list = NN_MLP(layers)

    model_list.load_state_dict(state_dict['net'])
    model_list.eval()
    model_list.to(device=device)

    lamb = 0.1
    input_Y = data_arr[:, 2:2+n_species].copy()
    input_bct = data_arr[:, :2+n_species].copy()
    # input_bct[:, 1] = 101325
    input_bct[:, 2:] = (input_bct[:, 2:]**(lamb) - 1) / lamb
    input_normalized = (input_bct - data_in_mean) / data_in_std
    # input_normalized = (input_bct - data_in_mean) / (data_in_std - data_in_mean)
    input_normalized = torch.tensor(input_normalized).float()
    input_normalized = input_normalized.to(device)

    output_normalized = []

    output_normalized.append(model_list(input_normalized))
    output_normalized = torch.cat(output_normalized, dim=1)

    output_bct = output_normalized.detach().cpu()* data_target_std + data_target_mean + input_bct[:, 2:-1]
    # output_bct = output_normalized.detach().cpu()* (data_target_std - data_target_mean) + data_target_mean + input_bct[:, 2:-1]
    output_Y = input_Y.copy()
    output_Y[:, :-1] = (lamb * output_bct + 1)**(1 / lamb)
    output_Y[:, :-1] = output_Y[:, :-1] / np.sum(output_Y[:, :-1], axis=1, keepdims=True) * (1 - output_Y[:, -1:])

    data_target = data_arr[:, 4+2*n_species:4+3*n_species]
    
    init_t = data_arr[:, 0]
    
    target_delta_Y = data_target - input_Y
    predicted_delta_Y = output_Y - input_Y
    
    return data_target, output_Y, init_t, target_delta_Y, predicted_delta_Y

fuel_comp = 'NH3:0.6,CH4:0.4'
air = 'O2:1,N2:3.76'
def process_row(row, gas, fuel_comp=fuel_comp, air=air):
    
    gas.TPY = row[0], row[1], row[2:] 
    equiv_ratio = gas.equivalence_ratio(fuel_comp, air)
    
    return equiv_ratio

training_data = np.load('/mnt/d/u_XJTU.Dec/working_dir/1d_flame_60nh3/dataset_1Dflame_60NH3_ULFS_noMinTempLimit_raw.npy')
data_arr = process_data(training_data)
data_target, output_Y, init_t, target_delta_Y, predicted_delta_Y = infer(data_arr)

chem = "Okafor2018_s59r356.yaml"
gas = ct.Solution(chem)
n_species = gas.n_species

n_process = 40

test_tmp = np.copy(data_arr[:, :2+n_species])
print(test_tmp.shape)
rows_per_process = (test_tmp.shape[0]+n_process-1)//n_process

def process_batch(process_num):
    results_process = np.empty((0, 1), float)
    
    for n in range(process_num*rows_per_process, min((process_num+1)*rows_per_process, test_tmp.shape[0])):
        equiv_ratio = process_row(test_tmp[n], gas)
        
        results_process = np.vstack((results_process, equiv_ratio))
    
    return results_process

with Pool(processes=n_process) as pool:
    results = pool.map(process_batch, range(n_process))
    
    results = np.vstack(results)
    print(results.shape)

equiv_ratios = results.flatten()


# for sn in gas.species_names:
for sn in ['N2', 'CH4', 'NH3', 'OH']:

    fig, ax = plt.subplots(1, 1, figsize=(3.5*2, 2.625*2), dpi=300, layout='constrained')

    i = gas.species_index(sn)
    
    mask = data_target[:,i] != 0
    
    # x = data_target[:,i]
    x = target_delta_Y[:,i]
    y = predicted_delta_Y[:,i]
    # y = output_Y[:,i]
    # y = (output_Y[:,i] - data_target[:,i]) / data_target[:,i]
    # y = (BCT(output_Y[:,i]) - BCT(data_target[:,i])) / BCT(data_target[:,i])
    x = BCT(x)
    # y = np.abs(y)
    y = BCT(y)
    
    sc = ax.scatter(x, y, s=6, c=init_t, cmap='jet')       

    rmse = (np.sum((data_target[:,i] - output_Y[:,i])**2)/data_target.shape[0])**0.5
    
    ax.text(0.35, 0.9, f"{sn} RMSE: {rmse:.5e}", transform=ax.transAxes,
        ha='center', va='center', bbox=dict(facecolor='white', alpha=0.5))

    limit = ax.get_xlim()    
        
    fig.colorbar(sc, ax=ax, label='Temperature [K]', shrink=0.8)
    
    ax.axis('equal')
    ax.set_box_aspect(1)
    
    x_limits = ax.get_xlim()
    y_limits = ax.get_ylim()
    limits = [min(x_limits[0], y_limits[0]), max(x_limits[1], y_limits[1])]
    ax.set_xlim(limits)
    ax.set_ylim(limits)
    
    ax.plot(limits, limits, 'k--', lw=1)
                 
    plt.savefig("residual_plot_"+sn+".png")
    plt.close()
    