"""
Title: Message-passing neural network (MPNN) for molecular property prediction
Author: [akensert](http://github.com/akensert)
Date created: 2021/08/16
Last modified: 2021/12/27
Description: Implementation of an MPNN to predict blood-brain barrier permeability.
"""

"""
## Introduction

In this tutorial, we will implement a type of graph neural network (GNN) known as
_ message passing neural network_ (MPNN) to predict graph properties. Specifically, we will
implement an MPNN to predict a molecular property known as
_blood-brain barrier permeability_ (BBBP).

Motivation: as molecules are naturally represented as an undirected graph `G = (V, E)`,
where `V` is a set or vertices (nodes; atoms) and `E` a set of edges (bonds), GNNs (such
as MPNN) are proving to be a useful method for predicting molecular properties.

Until now, more traditional methods, such as random forests, support vector machines, etc.,
have been commonly used to predict molecular properties. In contrast to GNNs, these
traditional approaches often operate on precomputed molecular features such as
molecular weight, polarity, charge, number of carbon atoms, etc. Although these
molecular features prove to be good predictors for various molecular properties, it is
hypothesized that operating on these more "raw", "low-level", features could prove even
better.
"""

import os

# Temporary suppress tf logs
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
import warnings
from rdkit import RDLogger

from data.bbbp import read_from
from data.smiles_datasets import MPNNDataset, dataset_triplets
from data.smiles_representations import graphs_from_smiles, molecule_from_smiles, graph_from_molecule

from solvers.mpnn import MPNNModel

# Temporary suppress warnings and RDKit logs
warnings.filterwarnings("ignore")
RDLogger.DisableLog("rdApp.*")

np.random.seed(42)
tf.random.set_seed(42)



if __name__ == '__main__':
  print('### [INFO] prepare dataset')
  f_bbbp = '/home/yangw/samples/bbbp.feather'
  df = read_from(f_bbbp)
  print(' ## [INFO] split dataset')
  x_train, y_train, train_index, x_valid, y_valid, valid_index, x_test, y_test, test_index = dataset_triplets(df)
  
  print('### [INFO] Test the functions')
  print(f"Name:\t{df.name[100]}\nSMILES:\t{df.smiles[100]}\nBBBP:\t{df.p_np[100]}")
  molecule = molecule_from_smiles(df.iloc[100].smiles)
  print(" ## [INFO] Molecule:")
  print(molecule)
  # test graph_from_molecule
  graph = graph_from_molecule(molecule)
  print(" ## [INFO] Graph (including self-loops):")
  print("\tatom features size\t", graph[0].shape)
  print("\tbond features size\t", graph[1].shape)
  print("\tpair indices size\t", graph[2].shape)
  
  print('### [INFO] setup model')
  import pdb

  pdb.set_trace()
  mpnn = MPNNModel(
    atom_dim=x_train[0][0][0].shape[0],
    bond_dim=x_train[1][0][0].shape[0],
  )
  mpnn.compile(
    loss=keras.losses.BinaryCrossentropy(),
    optimizer=keras.optimizers.Adam(learning_rate=5e-4),
    metrics=[keras.metrics.AUC(name="AUC")],
  )


  print('### [INFO] training')
  train_dataset = MPNNDataset(x_train, y_train)
  valid_dataset = MPNNDataset(x_valid, y_valid)
  history = mpnn.fit(
    train_dataset,
    validation_data=valid_dataset,
    epochs=40,
    verbose=2,
    class_weight={0: 2.0, 1: 0.5},
  )
  mpnn.save_weights('mpnn_bbbp_e40.h5')
  # save plot
  plt.figure(figsize=(10, 6))
  plt.plot(history.history["AUC"], label="train AUC")
  plt.plot(history.history["val_AUC"], label="valid AUC")
  plt.xlabel("Epochs", fontsize=16)
  plt.ylabel("AUC", fontsize=16)
  plt.legend(fontsize=16)
  plt.savefig('training_mpnn.png')