# Example 3: Using pmap to training

import os
import sys

sys.path.append(os.path.dirname(sys.path[0]))

import jax
import time
import optax
import pickle
import functools
import numpy as np
import jax.numpy as jnp
import haiku as hk
import jaxopt as opt

from absl import app
from absl import flags
from jax import Array, jit, pmap
from jaxopt import OptaxSolver
from typing import Optional, Union, List, Callable, Any
from ml_collections import config_flags

from flax import jax_utils
from cybertron.embedding import ConformationEmbedding
from cybertron.model import MolCT
from cybertron.readout import AtomwiseReadout
from cybertron import Cybertron
from cybertron.utils.train import print_net_params, shard_array

# User defined network is passed here.
# We use similar ways in tutorial_c02.py for mindspore cybertron.
# The difference is that the net must be defined in a FUNCTION!
def my_net(atom_type, 
           atom_mask, 
           bond_type, 
           bond_mask, 
           coordinate, 
           dist_mask,):

    emb = ConformationEmbedding(dim_node=128,
                                dim_edge=128,
                                rbf_runc='log_gaussian') # type: ignore
    model = MolCT(dim_feature=128, 
                  dim_edge_emb=128, 
                  dim_node_emb=128,) # type: ignore
    readout = [AtomwiseReadout(dim_node_rep=128,
                               activation='silu',)] # type: ignore
    cbt = Cybertron(config=None, 
                    embedding=emb,
                    model=model,
                    readout=readout,) # type: ignore
    
    return cbt(atom_type, atom_mask, bond_type, bond_mask, coordinate, dist_mask)

# We use vmap to vectorize the network, so that we can use batched data.
# And hk.transform is used to convert haiku modules to one function.
net = jax.vmap(my_net, axis_name="mini_batch")
net = hk.transform(net, apply_rng=True)

# We use MAE loss here.
@jit
def loss_func(params, label, num_atoms, rng_key, **batch):

    # using net.apply to caculate outputs
    outputs = net.apply(params, rng_key, **batch)
    diff = (outputs[0].squeeze() - label) / num_atoms
    loss = jnp.abs(diff)
    weights = num_atoms / jnp.sum(num_atoms)
    return jnp.sum(loss * weights)

def psum(fun: Callable[..., Any], axis_name: str = 'device_batch') -> Callable[..., Any]:
  """Applies `jax.lax.psums` across `axis_name` for all of `fun`'s outputs."""
  maybe_psums = lambda t: jax.lax.psum(t, axis_name) if t is not None else t
  @functools.wraps(fun) # 使用functools.wraps装饰器，可以将原函数对象的指定属性复制给包装函数对象
  def wrapper(*args, **kwargs):
    return jax.tree_map(maybe_psums, fun(*args, **kwargs))
  return wrapper

def create_batches(data_dict, label, batch_size, data_size):

    num_batch = data_size // batch_size

    batches = []
    for k, v in data_dict.items():
        if v is None:
            batches.append([None for _ in range(num_batch)])
        else:
            batches.append(np.array_split(v, num_batch))
    
    out = []
    for i in range(num_batch):
        batch = {}
        for j, k in enumerate(data_dict.keys()):
            batch[k] = batches[j][i]
        out.append(batch)
    
    out_label = np.array_split(label, num_batch)

    return out, out_label

def create_learning_rate_fn(learning_rate: float,
                            batch_size: int,
                            steps_per_epoch: int,
                            warmup_epochs: int,
                            num_epochs: int,
                            ):
  
  """Creates learning rate schedule."""
  base_learning_rate = learning_rate * batch_size / 256.

  warmup_fn = optax.linear_schedule(
      init_value=0., end_value=base_learning_rate,
      transition_steps=warmup_epochs * steps_per_epoch)
  
  cosine_epochs = max(num_epochs - warmup_epochs, 1)
  cosine_fn = optax.cosine_decay_schedule(
      init_value=base_learning_rate,
      decay_steps=cosine_epochs * steps_per_epoch)
  schedule_fn = optax.join_schedules(
      schedules=[warmup_fn, cosine_fn],
      boundaries=[warmup_epochs * steps_per_epoch])
  
  return schedule_fn

if __name__ == "__main__":

    # load datasets
    train_ds = np.load(sys.path[0] + "/dataset_qm9_normed_trainset_1024.npz")
    train_ds = dict(train_ds)

    label = train_ds['label'][:, 7]
    batch_ds = {'atom_type': train_ds.get('atom_type'),
                'atom_mask': train_ds.get('atom_mask'),
                'bond_type': train_ds.get('bond_type'),
                'bond_mask': train_ds.get('bond_mask'),
                'coordinate': train_ds.get('coordinate'),
                'dist_mask': train_ds.get('dist_mask'),}
    
    atom_mask = batch_ds['atom_type'] > 0 # type: ignore
    num_atoms = jnp.sum(atom_mask, axis=-1) # type: ignore
    
    # split the batch_ds into mini batches
    batch_size = 32
    data_size = 1024
    batch_ds, label = create_batches(batch_ds, label, batch_size, data_size)
    num_atoms = np.array_split(num_atoms, data_size // batch_size)

    # Initialize solver: optimizer and loss function passed here.
    learning_rate = create_learning_rate_fn(1e-3, 32, 1024 // 32, 2, 8)
    pmap_loss_func = psum(loss_func, axis_name='device_batch')
    solver = OptaxSolver(opt=optax.adam(learning_rate=learning_rate), fun=pmap_loss_func)

    # using the net.init method to initialize the network and get net parameters.
    rng_seq = hk.PRNGSequence(114514)
    params = net.init(next(rng_seq), **batch_ds[0])
    # params = jax.tree_map(jax_utils.replicate, params)
    
    print_net_params(params)

    # init_label = jax_utils.replicate(label[0])
    # init_num_atoms = jax_utils.replicate(num_atoms[0])
    # init_ds = jax.tree_map(shard_array, batch_ds[0])
    # init_key = jax_utils.replicate(next(rng_seq))
    # state = pmap(solver.init_state, axis_name='device_batch')(params, init_label, init_num_atoms, init_key, **init_ds)

    init_label = label[0]
    init_num_atoms = num_atoms[0]
    init_ds = batch_ds[0]
    init_key = next(rng_seq)
    
    _solver = OptaxSolver(opt=optax.adam(learning_rate=learning_rate), fun=loss_func)
    state = _solver.init_state(params, init_label, init_num_atoms, init_key, **init_ds)

    params = jax_utils.replicate(params)
    state = jax_utils.replicate(state)
    # training loop
    epoch_number = 8
    call_back_step = 16

    for epoch in range(epoch_number):
        for step, batch in enumerate(batch_ds):
            _num_atoms = shard_array(num_atoms[step])
            _label = shard_array(label[step])
            _batch = jax.tree_map(shard_array, batch)
            _key = jax_utils.replicate(next(rng_seq))
            params, state = pmap(solver.update, axis_name='device_batch')(params=params, 
                                                                          state=state,
                                                                          num_atoms=_num_atoms,
                                                                          rng_key=_key,
                                                                          label=_label,
                                                                          **_batch)
                        
            if (step + 1) % call_back_step == 0:
                host_params = jax_utils.unreplicate(params)
                print(f"Epoch {epoch + 1} step {step + 1}, MAE loss now: ")
                print(loss_func(host_params, label[step], num_atoms[step], next(rng_seq), **batch))