# Main program.

import jax
import optax
import numpy as np
import jax.numpy as jnp
import haiku as hk
import jaxopt as opt

from jax import Array, jit
from jaxopt import OptaxSolver
from typing import Optional, Union, List

from cybertron.embedding import ConformationEmbedding
from cybertron.model import MolCT
from cybertron.readout import AtomwiseReadout
from cybertron import Cybertron

# User defined network is passed here.
# We use similar ways in tutorial_c02.py for mindspore cybertron.
# The difference is that the net must be defined in a FUNCTION!
def my_net(atom_type, 
        atom_mask, 
        bond_type, 
        bond_mask, 
        coordinate, 
        dist_mask,):

    emb = ConformationEmbedding(dim_node=16,
                                dim_edge=16,) # type: ignore
    model = MolCT(dim_feature=16, 
                dim_edge_emb=16, 
                dim_node_emb=16,) # type: ignore
    readout = [AtomwiseReadout(dim_node_rep=16,
                            activation='silu',)] # type: ignore
    cbt = Cybertron(config=None, 
                    embedding=emb,
                    model=model,
                    readout=readout,) # type: ignore
    
    return cbt(atom_type, atom_mask, bond_type, bond_mask, coordinate, dist_mask) ## Liyh: ml-collections

# We use vmap to vectorize the network, so that we can use batched data.
# And hk.transform is used to convert haiku modules to one function.
net = jax.vmap(my_net)
net = hk.transform(net, apply_rng=True)

# We use MSE loss here.
@jit
def loss_func(params, label, rng_key, **batch):

     # using net.apply to caculate outputs
    outputs = net.apply(params, rng_key, **batch)
    return jnp.mean((outputs[0] - label) ** 2)

if __name__ == "__main__":

    # Initialize solver: optimizer and loss function passed here.
    solver = OptaxSolver(opt=optax.adam(learning_rate=1e-3), fun=loss_func)

    train_ds = np.load("module_test/test_qm9_1024.npz")
    train_ds = dict(train_ds)

    label = train_ds.get('label')
    batch_ds = {'atom_type': train_ds.get('atom_type'),
                'atom_mask': train_ds.get('atom_mask'),
                'bond_type': train_ds.get('bond_type'),
                'bond_mask': train_ds.get('bond_mask'),
                'coordinate': train_ds.get('coordinate'),
                'dist_mask': train_ds.get('dist_mask'),} ## Liyh: batch split
    
    # using the net.init method to initialize the network and get net parameters.
    rng_seq = hk.PRNGSequence(114514)
    params = net.init(next(rng_seq), **batch_ds) ## Liyh: print info
    state = solver.init_state(params, label, next(rng_seq), **batch_ds)

    # training loop
    for step in range(100):
        params, state = solver.update(params=params, state=state,
                                      rng_key=next(rng_seq),
                                      label=label,
                                      **batch_ds)
        print("loss_now: ")
        print(loss_func(params, label, next(rng_seq), **batch_ds)) ## Liyh: callback methods, save ckpt/eval

        