import random
import numpy as np
import cantera as ct

from pathlib import Path

import multiprocessing as mp
from multiprocessing import shared_memory

import os

max_processes = os.cpu_count()
n_process = np.min([max_processes, 100])

print('<TIME INTEGRATION USING CANTERA CVODE>\n')
print(f'RUNNING ON {n_process} PROCESSES ...')

chem = 'Okafor2018_s59r356.yaml'
gas = ct.Solution(chem)
n_species = gas.n_species

print(f'USING MECHANISM: {chem} ...')
print(f'TOTAL OF {n_species} SPECIES ...')

init_dataset_path = Path('1Dflame_60NH3_ULFS_noMinTempLimit.npy')
save_path = Path('.') / f'dataset_{init_dataset_path.stem}.npy'
time_step = 1e-6

print(f'LOADING DATASET: {init_dataset_path.resolve()} ...')
print(f'PERFORMING TIME INTEGRATION: {time_step} s ...\n')

def single_step(npstate):

    T_old, P_old, Y_old = npstate[0], npstate[1], npstate[2:]
    gas.TPY = T_old, P_old, Y_old
    res_1st = [T_old, P_old] + list(gas.Y) + list(gas.partial_molar_enthalpies/gas.molecular_weights)
    
    r = ct.Reactor(gas, name='R1',energy='off')
    sim = ct.ReactorNet([r])
    sim.rtol, sim.atol = 1e-6, 1e-10
    
    sim.advance(time_step)
    new_TPY = [gas.T, gas.P] + list(gas.Y) + list(gas.partial_molar_enthalpies/gas.molecular_weights)
    res_1st += new_TPY

    return res_1st


final_test = np.load(init_dataset_path)
# selected_rows = np.random.choice(final_test.shape[0], 800000, replace=False)
# final_test = final_test[selected_rows]

print(f'DATASET SIZE: {final_test.shape}\n')

augmentSet = final_test.copy()

test_n_rows = augmentSet.shape[0]
rows_per_process = (test_n_rows + n_process - 1)//n_process

shm1 = shared_memory.SharedMemory(create=True, 
                                  size=8*test_n_rows*(4+4*n_species)
                                  )
cantera_out2 = np.ndarray((augmentSet.shape[0],4+4*n_species), 
                          dtype=np.float64, 
                          buffer=shm1.buf
                          )

def worker_evolution(ii):
    for i in range(ii*rows_per_process, (ii+1)*rows_per_process):
        cantera_out2[i] = np.array((single_step(augmentSet[i, :2+n_species])))
pool = mp.Pool(n_process)
for ii in range(n_process):
    pool.apply_async(worker_evolution, (ii,))
pool.close()
pool.join()

np.random.shuffle(cantera_out2)
np.save(save_path, cantera_out2)
print(f'RESULT DATASET SIZE: {cantera_out2.shape}')
print(f'SAVED TO: {save_path}')

shm1.close()
shm1.unlink()
