# Example 2: 
# 1.Using ml-collections to define a model.
# 2.Evaluation in training loop.
# 3.Save model config and parameters.

import os
import sys

sys.path.append(os.path.dirname(sys.path[0]))

import jax
import time
import optax
import pickle
import numpy as np
import jax.numpy as jnp
import haiku as hk
import jaxopt as opt

from absl import app
from absl import flags
from jax import Array, jit
from jaxopt import OptaxSolver
from typing import Optional, Union, List
from ml_collections import config_flags

from cybertron.embedding import ConformationEmbedding
from cybertron.model import MolCT
from cybertron.readout import AtomwiseReadout
from cybertron import Cybertron
from cybertron.utils.train import print_net_params

NET_CONFIG = config_flags.DEFINE_config_file('net_config')
flags.DEFINE_integer('batch_size', 32, 'Batch size.')
flags.DEFINE_integer('epoch_number', 8, 'Number of epochs.')
flags.DEFINE_integer('seed', 114514, 'Random seed.')
flags.DEFINE_integer('call_back_step', 16, 'Number of steps between callbacks.')
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate.')

FLAGS = flags.FLAGS

# User defined network is passed here.
# We use similar ways in tutorial_c02.py for mindspore cybertron.
# The difference is that the net must be defined in a FUNCTION!
def my_net(atom_type, 
           atom_mask, 
           bond_type, 
           bond_mask, 
           coordinate, 
           dist_mask,):

    emb = ConformationEmbedding(**NET_CONFIG.value.embedding) # type: ignore
    model = MolCT(**NET_CONFIG.value.model) # type: ignore
    readout = [AtomwiseReadout(**NET_CONFIG.value.readout)] # type: ignore
    cbt = Cybertron(config=None, 
                    embedding=emb,
                    model=model,
                    readout=readout,) # type: ignore
    
    return cbt(atom_type, atom_mask, bond_type, bond_mask, coordinate, dist_mask)

def create_batches(data_dict, batch_size, data_size):

    num_batch = data_size // batch_size
    _keys = ['atom_type', 'atom_mask', 'bond_type', 'bond_mask', 'coordinate', 'dist_mask']

    batches = []
    for k, v in data_dict.items():
        if v is None:
            batches.append([None for _ in range(num_batch)])
        else:
            batches.append(np.array_split(v, num_batch))
    
    out = []
    for i in range(num_batch):
        batch = {}
        for j, k in enumerate(_keys):
            batch[k] = batches[j][i]
        out.append(batch)
    
    out_label = np.array_split(data_dict['label'], num_batch)
    out_num_atoms = np.array_split(data_dict['num_atoms'], num_batch)

    return out, out_label, out_num_atoms

def main(_argv):

    # We use vmap to vectorize the network, so that we can use batched data.
    # And hk.transform is used to convert haiku modules to one function.
    net = jax.vmap(my_net)
    net = hk.transform(net, apply_rng=True)

    # Loss function is defined here.
    # We use MAE loss here.
    @jit
    def loss_func(params, label, num_atoms, rng_key, **batch):

        # using net.apply to caculate outputs
        outputs = net.apply(params, rng_key, **batch)
        diff = (outputs[0].squeeze() - label) / num_atoms
        loss = jnp.abs(diff)
        weights = num_atoms / jnp.sum(num_atoms)
        return jnp.sum(loss * weights)

    # Initialize solver: optimizer and loss function passed here.
    solver = OptaxSolver(opt=optax.adam(learning_rate=FLAGS.learning_rate), fun=loss_func)

    # Load QM9 data from mindspore cybertron tutorials.
    train_ds = dict(np.load(sys.path[0] + "/dataset_qm9_normed_trainset_1024.npz"))
    valid_ds = dict(np.load(sys.path[0] + "/dataset_qm9_normed_validset_128.npz"))

    # Processing train dataset and valid dataset.
    train_ds = {'atom_type': train_ds['atom_type'],
                'atom_mask': train_ds.get('atom_mask'),
                'bond_type': train_ds.get('bond_type'),
                'bond_mask': train_ds.get('bond_mask'),
                'coordinate': train_ds['coordinate'],
                'dist_mask': train_ds.get('dist_mask'),
                'label': train_ds['label'][:, 7],}
    
    # num_atoms should be caculated and passed to the loss function.
    # weights in loss function is derived from num_atoms.
    atom_mask = train_ds['atom_type'] > 0
    num_atoms = jnp.sum(atom_mask, axis=-1)
    train_ds['num_atoms'] = num_atoms

    valid_ds = {'atom_type': valid_ds['atom_type'],
                'atom_mask': valid_ds.get('atom_mask'),
                'bond_type': valid_ds.get('bond_type'),
                'bond_mask': valid_ds.get('bond_mask'),
                'coordinate': valid_ds['coordinate'],
                'dist_mask': valid_ds.get('dist_mask'),
                'label': valid_ds['label'][:, 7],}
    
    atom_mask = valid_ds['atom_type'] > 0
    num_atoms = jnp.sum(atom_mask, axis=-1)
    valid_ds['num_atoms'] = num_atoms
    
    # split the batch_ds into mini batches
    batch_size = FLAGS.batch_size
    data_size = train_ds['num_atoms'].shape[0] # other values can also be used to get data_size
    train_ds, train_label, train_num_atoms = create_batches(train_ds, batch_size, data_size)
    valid_ds, valid_label, valid_num_atoms = create_batches(valid_ds, batch_size, data_size)

    # using the net.init method to initialize the network and get net parameters.
    rng_seq = hk.PRNGSequence(FLAGS.seed)
    params = net.init(next(rng_seq), **train_ds[0])
    state = solver.init_state(params, train_label[0], 
                              train_num_atoms[0], next(rng_seq), **train_ds[0])
    print_net_params(params)

    # save net config file
    output_dir = sys.path[0] + "/results_2/"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    with open(output_dir + "net_config.pkl", "wb") as f:
        pickle.dump(NET_CONFIG.value, f)

    # training loop
    epoch_number = FLAGS.epoch_number
    call_back_step = FLAGS.call_back_step
    min_loss = 1e3
    print("Start training...")
    beg_time = time.time()
    for epoch in range(epoch_number):
        for step, batch in enumerate(train_ds):
            params, state = jit(solver.update)(params=params, 
                                          state=state,
                                          num_atoms=train_num_atoms[step],
                                          rng_key=next(rng_seq),
                                          label=train_label[step],
                                          **batch)
            
            ## Liyh: callback methods, save ckpt/eval
            if (step + 1) % call_back_step == 0:
                eval_loss = loss_func(params, valid_label[0], 
                                valid_num_atoms[0], next(rng_seq), **valid_ds[0])
                print(f"Epoch {epoch + 1} step {step + 1}, eval MAE loss now: {eval_loss:.4f}")
                if eval_loss < min_loss:
                    min_loss = eval_loss
                    with open(output_dir + "params.pkl", "wb") as f:
                        pickle.dump(params, f)
                # train_loss = loss_func(params, train_label[step],
                #                 train_num_atoms[step], next(rng_seq), **batch)
                # print(f"Epoch {epoch + 1} step {step + 1}, train MAE loss now: {train_loss:.4f}")

    end_time = time.time()
    used_time = end_time - beg_time
    m, s = divmod(used_time, 60)
    h, m = divmod(m, 60)
    print("Training Fininshed!")
    print("Training Time: %02d:%02d:%02d" % (h, m, s))
                    
if __name__ == "__main__":
    app.run(main)