# Example 1: things first.

import os
import sys

sys.path.append(os.path.dirname(sys.path[0]))

import jax
import optax
import numpy as np
import jax.numpy as jnp
import haiku as hk
import jaxopt as opt

from jax import Array, jit
from jaxopt import OptaxSolver
from typing import Optional, Union, List

from cybertron.embedding import ConformationEmbedding
from cybertron.model import MolCT
from cybertron.readout import AtomwiseReadout
from cybertron.utils.train import print_net_params
from cybertron import Cybertron

# User defined network is passed here.
# We use similar ways in tutorial_c02.py for mindspore cybertron.
# The difference is that the net must be defined in a FUNCTION!
def my_net(atom_type, 
           atom_mask, 
           bond_type, 
           bond_mask, 
           coordinate, 
           dist_mask,):

    emb = ConformationEmbedding(dim_node=128,
                                dim_edge=128,
                                rbf_runc='log_gaussian') # type: ignore
    model = MolCT(dim_feature=128, 
                  dim_edge_emb=128, 
                  dim_node_emb=128,) # type: ignore
    readout = [AtomwiseReadout(dim_node_rep=128,
                               activation='silu',)] # type: ignore
    cbt = Cybertron(config=None, 
                    embedding=emb,
                    model=model,
                    readout=readout,) # type: ignore
    
    return cbt(atom_type, atom_mask, bond_type, bond_mask, coordinate, dist_mask)

# We use vmap to vectorize the network, so that we can use batched data.
# And hk.transform is used to convert haiku modules to one function.
net = jax.vmap(my_net)
net = hk.transform(net, apply_rng=True)

# We use MAE loss here.
@jit
def loss_func(params, label, num_atoms, rng_key, **batch):

    # using net.apply to caculate outputs
    outputs = net.apply(params, rng_key, **batch)
    diff = (outputs[0].squeeze() - label) / num_atoms
    loss = jnp.abs(diff)
    weights = num_atoms / jnp.sum(num_atoms)
    return jnp.sum(loss * weights)

def create_batches(data_dict, label, batch_size, data_size):

    num_batch = data_size // batch_size

    batches = []
    for k, v in data_dict.items():
        if v is None:
            batches.append([None for _ in range(num_batch)])
        else:
            batches.append(np.array_split(v, num_batch))
    
    out = []
    for i in range(num_batch):
        batch = {}
        for j, k in enumerate(data_dict.keys()):
            batch[k] = batches[j][i]
        out.append(batch)
    
    out_label = np.array_split(label, num_batch)

    return out, out_label

def create_learning_rate_fn(learning_rate: float,
                            batch_size: int,
                            steps_per_epoch: int,
                            warmup_epochs: int,
                            num_epochs: int,
                            ):
  
  """Creates learning rate schedule."""
  base_learning_rate = learning_rate * batch_size / 256.

  warmup_fn = optax.linear_schedule(
      init_value=0., end_value=base_learning_rate,
      transition_steps=warmup_epochs * steps_per_epoch)
  
  cosine_epochs = max(num_epochs - warmup_epochs, 1)
  cosine_fn = optax.cosine_decay_schedule(
      init_value=base_learning_rate,
      decay_steps=cosine_epochs * steps_per_epoch)
  schedule_fn = optax.join_schedules(
      schedules=[warmup_fn, cosine_fn],
      boundaries=[warmup_epochs * steps_per_epoch])
  
  return schedule_fn

if __name__ == "__main__":

    # Initialize solver: optimizer and loss function passed here.
    learning_rate = create_learning_rate_fn(1e-3, 32, 1024 // 32, 2, 8)
    solver = OptaxSolver(opt=optax.adam(learning_rate=learning_rate), fun=loss_func)

    train_ds = np.load(sys.path[0] + "/dataset_qm9_normed_trainset_1024.npz")
    train_ds = dict(train_ds)

    label = train_ds['label'][:, 7]
    batch_ds = {'atom_type': train_ds.get('atom_type'),
                'atom_mask': train_ds.get('atom_mask'),
                'bond_type': train_ds.get('bond_type'),
                'bond_mask': train_ds.get('bond_mask'),
                'coordinate': train_ds.get('coordinate'),
                'dist_mask': train_ds.get('dist_mask'),}
    
    atom_mask = batch_ds['atom_type'] > 0 # type: ignore
    num_atoms = jnp.sum(atom_mask, axis=-1) # type: ignore
    
    # split the batch_ds into mini batches
    batch_size = 32
    data_size = 1024
    batch_ds, label = create_batches(batch_ds, label, batch_size, data_size)
    num_atoms = np.array_split(num_atoms, data_size // batch_size)

    # using the net.init method to initialize the network and get net parameters.
    rng_seq = hk.PRNGSequence(114514)
    params = net.init(next(rng_seq), **batch_ds[0])
    state = solver.init_state(params, label[0], num_atoms[0], next(rng_seq), **batch_ds[0])
    print_net_params(params)

    # training loop
    epoch_number = 32
    call_back_step = 16
    learning_rates = []
    for epoch in range(epoch_number):
        for step, batch in enumerate(batch_ds):
            params, state = solver.update(params=params, 
                                          state=state,
                                          num_atoms=num_atoms[step],
                                          rng_key=next(rng_seq),
                                          label=label[step],
                                          **batch)
            
            if (step + 1) % call_back_step == 0:
                print(f"Epoch {epoch} step {step}, MAE loss now: ")
                print(loss_func(params, label[step], num_atoms[step], next(rng_seq), **batch)) ## Liyh: callback methods, save ckpt/eval          