# Example 4: Training O2H4

import os
import sys

sys.path.append(os.path.dirname(sys.path[0]))

import jax
import time
import optax
import pickle
import numpy as np
import jax.numpy as jnp
import haiku as hk
import jaxopt as opt

from absl import app
from absl import flags
from jax import Array, jit
from jaxopt import OptaxSolver
from typing import Optional, Union, List
from ml_collections import config_flags

from cybertron.embedding import ConformationEmbedding
from cybertron.model import MolCT
from cybertron.readout import AtomwiseReadout, PairwiseReadout
from cybertron import Cybertron
from cybertron.utils.train import print_net_params, transformer_lr, batch_split
from cybertron.utils.train import LinearScaleShift

# NET_CONFIG = config_flags.DEFINE_config_file('net_config', 'example_4_config.py')
# flags.DEFINE_integer('batch_size', 32, 'Batch size.')
# flags.DEFINE_integer('epoch_number', 8, 'Number of epochs.')
# flags.DEFINE_integer('seed', 114514, 'Random seed.')
# flags.DEFINE_integer('call_back_step', 16, 'Number of steps between callbacks.')
# flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate.')

# FLAGS = flags.FLAGS

def my_net(atom_type, 
           atom_mask, 
           bond_type, 
           bond_mask, 
           coordinate, 
           dist_mask,):

    emb = ConformationEmbedding(dim_node=64,
                                dim_edge=64,
                                num_atom_types=64,
                                is_emb_dis=True,
                                is_emb_bond=False,
                                dis_self=0.05,
                                cutoff=2.0,
                                cutoff_func='smooth',
                                rbf_runc='log_gaussian',
                                name='embedding',) # type: ignore
    model = MolCT(dim_feature=64,
                  dim_node_emb=64,
                  dim_edge_emb=64,
                  is_edge_update=True,
                  is_coupled_interaction=False,
                  n_interaction=3,
                  n_heads=4,) # type: ignore
    readout = [PairwiseReadout(dim_node_rep=64,
                               dim_edge_rep=64,
                               dim_output=1,
                               activation='silu',)] # type: ignore
    cbt = Cybertron(config=None, 
                    embedding=emb,
                    model=model,
                    readout=readout,) # type: ignore
    
    return cbt(atom_type, atom_mask, bond_type, bond_mask, coordinate, dist_mask)

def create_batches(data_dict, batch_size, data_size):

    num_batch = (data_size + batch_size - 1) // batch_size
    print("[Batch spliter] Create batch number:", num_batch)
    input_keys = ['atom_type', 'atom_mask', 'bond_type', 'bond_mask', 'coordinate', 'dist_mask']

    batches = []
    for k, v in data_dict.items():
        if v is None:
            batches.append([None for _ in range(num_batch)])
        elif k in input_keys:
            batches.append(batch_split(v, batch_size, drop_remainder=False))
        else:
            pass
    
    out = []
    for i in range(num_batch):
        batch = {}
        for j, k in enumerate(input_keys):
            batch[k] = batches[j][i]
        out.append(batch)
    
    out_label = batch_split(data_dict['label'], batch_size)
    out_num_atoms = batch_split(data_dict['num_atoms'], batch_size)

    return out, out_label, out_num_atoms

if __name__ == "__main__":

    # We use vmap to vectorize the network, so that we can use batched data.
    # And hk.transform is used to convert haiku modules to one function.
    net = jax.vmap(my_net)
    net = hk.transform(net, apply_rng=True)

    # Loss function is defined here.
    # We use MSE loss here.
    @jit
    def loss_func(params, label, num_atoms, rng_key, **batch):

        # using net.apply to caculate outputs
        outputs = net.apply(params, rng_key, **batch)
        pred = outputs[0].squeeze()
        label = label.squeeze()
        diff = (pred - label) / num_atoms
        loss = jnp.square(diff)
        weights = num_atoms / jnp.sum(num_atoms)
        return jnp.sum(loss * weights)

    # Load water ccsd data.
    train_ds = dict(np.load(sys.path[0] + "/data_4/h4o2_train_sanitized_309500.npz"))
    valid_ds = dict(np.load(sys.path[0] + "/data_4/h4o2_valid_origin_48090.npz"))

    # set scale shift and eval RMSE metric
    scale = 1 / train_ds['scale']
    shift = train_ds['shift']
    scale_shift_fn = LinearScaleShift(scale, shift).calc

    @jit
    def eval_(params, label, num_atoms, rng_key, **batch):

        output = net.apply(params, rng_key, **batch)[0].squeeze()
        label = label.squeeze()
        diff = (output - label) / num_atoms
        weights = num_atoms / jnp.sum(num_atoms)
        loss = jnp.sum(jnp.square(diff) * weights)

        scale_label = scale_shift_fn(label)
        scale_outputs = scale_shift_fn(output)
        scaled_diff = scale_outputs - scale_label
        scaled_rmse = jnp.sqrt(jnp.mean(jnp.square(scaled_diff)))

        return loss, scaled_rmse

    # Create bond mask
    bond_idx = [[0, 2], [0, 3], [1, 4], [1, 5], [2, 3], [4, 5]]
    bond_mask = np.ones((1, 6, 6), np.bool_)
    for k in bond_idx:
        bond_mask[0, k[0], k[1]] = False
        bond_mask[0, k[1], k[0]] = False
    print("Bond mask is:\n", bond_mask)

    # Processing train dataset and valid dataset.
    size1 = train_ds['coordinate'].shape[0]
    size2 = valid_ds['coordinate'].shape[0]

    train_ds = {'atom_type': np.tile(train_ds['atom_type'], (size1, 1)),
                'atom_mask': train_ds.get('atom_mask'),
                'bond_type': train_ds.get('bond_type'),
                'bond_mask': np.tile(bond_mask, (size1, 1, 1)),
                'coordinate': train_ds['coordinate'],
                'dist_mask': train_ds.get('dist_mask'),
                'label': train_ds['label'],}
    
    print("The shape of inputs in train dataset is:")
    for k, v in train_ds.items():
        if v is None:
            pass
        else:
            print(k, v.shape)
    
    # num_atoms should be caculated and passed to the loss function.
    # weights in loss function is derived from num_atoms.
    atom_mask = train_ds['atom_type'] > 0
    num_atoms = jnp.sum(atom_mask, axis=-1)
    train_ds['num_atoms'] = num_atoms

    valid_ds = {'atom_type': np.tile(valid_ds['atom_type'], (size2, 1)),
                'atom_mask': valid_ds.get('atom_mask'),
                'bond_type': valid_ds.get('bond_type'),
                'bond_mask': np.tile(bond_mask, (size2, 1, 1)),
                'coordinate': valid_ds['coordinate'],
                'dist_mask': valid_ds.get('dist_mask'),
                'label': valid_ds['label'],}
    
    atom_mask = valid_ds['atom_type'] > 0
    num_atoms = jnp.sum(atom_mask, axis=-1)
    valid_ds['num_atoms'] = num_atoms
    
    # split the batch_ds into mini batches
    batch_size = 32
    train_data_size = train_ds['num_atoms'].shape[0] # other values can also be used to get data_size
    train_ds, train_label, train_num_atoms = create_batches(train_ds, batch_size, train_data_size)
    valid_data_size = valid_ds['num_atoms'].shape[0]
    valid_ds, valid_label, valid_num_atoms = create_batches(valid_ds, batch_size, valid_data_size)

    # Initialize solver: optimizer and loss function passed here.
    dim_feature = 64
    lr = transformer_lr(learning_rate=2e-1, warmup_steps=10000, dimension=dim_feature)
    solver = OptaxSolver(opt=optax.adam(learning_rate=lr), fun=loss_func)

    # using the net.init method to initialize the network and get net parameters.
    rng_seq = hk.PRNGSequence(114514)
    params = net.init(next(rng_seq), **train_ds[0])
    state = solver.init_state(params, train_label[0], 
                              train_num_atoms[0], next(rng_seq), **train_ds[0])
    print_net_params(params)

    # save net config file
    output_dir = sys.path[0] + "/results_4/"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # with open(output_dir + "net_config.pkl", "wb") as f:
    #     pickle.dump(NET_CONFIG.value, f)

    # training loop
    import copy
    init_param = copy.deepcopy(params)
    epoch_number = 16
    call_back_step = 16
    min_loss = 1e3

    print("Start training...")
    beg_time = time.time()
    for epoch in range(epoch_number):

        total_loss = 0.0
        for step, batch in enumerate(train_ds):

            label_ = train_label[step]
            num_atoms_ = train_num_atoms[step]

            params, state = solver.update(params=params, 
                                          state=state,
                                          num_atoms=num_atoms_,
                                          rng_key=next(rng_seq),
                                          label=label_,
                                          **batch)
            
            train_loss = loss_func(params, label_, num_atoms_, next(rng_seq), **batch)
            total_loss += train_loss

            # if (step + 1) % 512 == 0:
            #     print(f"Call back at step {step + 1}, train loss is {train_loss:.4e}.")
            
        ## callback per epoch
        total_eval_loss = 0.0
        total_eval_rmse = 0.0
        for eval_step, eval_batch in enumerate(valid_ds):

            eval_label_ = valid_label[eval_step]
            eval_num_atoms_ = valid_num_atoms[eval_step]

            eval_loss, eval_rmse = eval_(params, eval_label_, eval_num_atoms_, next(rng_seq), **eval_batch)
            total_eval_loss += eval_loss
            total_eval_rmse += eval_rmse

        total_eval_loss = total_eval_loss / (eval_step + 1)
        total_eval_rmse = total_eval_rmse / (eval_step + 1)
        avg_loss = total_loss / (step + 1)
        step_now = (epoch + 1) * ((train_data_size + batch_size - 1) // batch_size)

        print(f"Epoch {epoch + 1}, Step {step_now}, Learning rate: {lr(step_now):.3e}, ", end="")
        print(f"Last loss: {train_loss:.4e}, Avg loss: {avg_loss:.4e}, ", end="")
        print(f"Eval loss: {total_eval_loss:.4e}, Energy_RMSE: {total_eval_rmse:.3e}", end="\n")
        if total_eval_rmse < min_loss:
            min_loss = eval_loss
            with open(output_dir + "params_best.pkl", "wb") as f:
                pickle.dump(params, f)
    
    end_time = time.time()
    used_time = end_time - beg_time
    m, s = divmod(used_time, 60)
    h, m = divmod(m, 60)
    print("Training Fininshed!")
    print("Training Time: %02d:%02d:%02d" % (h, m, s))