# _*_ coding: utf-8 _*_
# Codes from Baidu AI, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# fintune of compound
# Author: MiqroEra Shibo

from helixwrapper.utils.splitters import RandomSplitter, IndexSplitter, ScaffoldSplitter
from helixwrapper.datasets import *

from downstream.model import DownstreamModel
from downstream.utils import calc_rocauc_score, exempt_parameters
from downstream.featurizer import DownstreamCollateFn, DownstreamTransformFn
from helixwrapper.utils import load_json_config
from helixwrapper.models import PretrainGNNModel
import paddle
import paddle.nn as nn

task_names = get_default_tox21_task_names()
print(task_names)

compound_encoder_config = load_json_config("/home/shibo/helix/helixwrapper/compound_property/model_configs/pregnn_paper.json")
model_config = load_json_config("/home/shibo/helix/helixwrapper/compound_property/model_configs/down_linear.json")
model_config['num_tasks'] = len(task_names)

compound_encoder = PretrainGNNModel(compound_encoder_config)
model = DownstreamModel(model_config, compound_encoder)
criterion = nn.BCELoss(reduction='none')
opt = paddle.optimizer.Adam(0.001, parameters=model.parameters())

compound_encoder.set_state_dict(paddle.load('./model/pretrain_attrmask/compound_encoder.pdparams'))

# dataset loading and feature extraction
### Load the toy dataset:
dataset = load_tox21_dataset("/home/shibo/helix/helixwrapper/compound_property/dataset/chem_dataset_small/tox21", task_names)
### Load the full dataset:
# dataset = load_tox21_dataset("./chem_dataset/tox21", task_names)
dataset.transform(DownstreamTransformFn(), num_workers=4)

# splitter = RandomSplitter()
splitter = ScaffoldSplitter()
train_dataset, valid_dataset, test_dataset = splitter.split(
        dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1)
print("Train/Valid/Test num: %s/%s/%s" % (
        len(train_dataset), len(valid_dataset), len(test_dataset)))


def train(model, train_dataset, collate_fn, criterion, opt):
    data_gen = train_dataset.get_data_loader(
            batch_size=128,
            num_workers=4,
            shuffle=True,
            collate_fn=collate_fn)
    list_loss = []
    model.train()
    for graphs, valids, labels in data_gen:
        graphs = graphs.tensor()
        labels = paddle.to_tensor(labels, 'float32')
        valids = paddle.to_tensor(valids, 'float32')
        preds = model(graphs)
        loss = criterion(preds, labels)
        loss = paddle.sum(loss * valids) / paddle.sum(valids)
        loss.backward()
        opt.step()
        opt.clear_grad()
        list_loss.append(loss.numpy())
    return np.mean(list_loss)

def evaluate(model, test_dataset, collate_fn):
    data_gen = test_dataset.get_data_loader(
            batch_size=128,
            num_workers=4,
            shuffle=False,
            collate_fn=collate_fn)
    total_pred = []
    total_label = []
    total_valid = []
    model.eval()
    for graphs, valids, labels in data_gen:
        graphs = graphs.tensor()
        labels = paddle.to_tensor(labels, 'float32')
        valids = paddle.to_tensor(valids, 'float32')
        preds = model(graphs)
        total_pred.append(preds.numpy())
        total_valid.append(valids.numpy())
        total_label.append(labels.numpy())
    total_pred = np.concatenate(total_pred, 0)
    total_label = np.concatenate(total_label, 0)
    total_valid = np.concatenate(total_valid, 0)
    return calc_rocauc_score(total_label, total_pred, total_valid)

collate_fn = DownstreamCollateFn(
        atom_names=compound_encoder_config['atom_names'],
        bond_names=compound_encoder_config['bond_names'])
for epoch_id in range(4):
    train_loss = train(model, train_dataset, collate_fn, criterion, opt)
    val_auc = evaluate(model, valid_dataset, collate_fn)
    test_auc = evaluate(model, test_dataset, collate_fn)
    print("epoch:%s train/loss:%s" % (epoch_id, train_loss))
    print("epoch:%s val/auc:%s" % (epoch_id, val_auc))
    print("epoch:%s test/auc:%s" % (epoch_id, test_auc))
paddle.save(model.state_dict(), './model/tox21/model.pdparams')