import argparse
import warnings
import os, time
import sys
import yaml
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
from numpy import nan
from scipy.stats import spearmanr
import mindspore
from mindspore import nn, Tensor, context
from mindspore import ops
from mindspore.nn import Accuracy
from mindspore.train.callback import Callback, LossMonitor
from mindspore.train.serialization import save_checkpoint, load_checkpoint, load_param_into_net
from mindspore.dataset import GeneratorDataset

from src.utils.draw_utils import plot_model
from src.utils.train_utils import lr_scheduler
from src.utils.data_utils import BatchSampler
from src.utils.utils import print_param_num
from model_ms import GNNModelMS
from dataset_ms import get_dataset

context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

def epoch_runner(args, loader, model, criterion1, criterion2, optimizer=None, stage="train"):
    lambda1 = Tensor(args.lambda1, mindspore.float32)
    lambda2 = Tensor(args.lambda2, mindspore.float32)
    model.set_train(stage == "train")
    total_loss = 0
    total_loss_list = [0, 0, 0, 0, 0, 0]
    epoch_bar = tqdm(loader.create_dict_iterator(), total=loader.get_dataset_size())

    for batch in epoch_bar:
        x, label = batch["x"], batch["y"]
        output = model(x)
        loss1 = criterion1(output[:, :20], label)
        total_loss_list[1] += loss1.asnumpy()
        loss = loss1
        dim = 20

        if args.use_sasa:
            loss2 = criterion2(output[:, dim], batch["y1"])
            total_loss_list[2] += loss2.asnumpy()
            loss += lambda1 * loss2
            dim += 1
        if args.use_bfactor:
            loss3 = criterion2(output[:, dim], batch["y2"])
            total_loss_list[3] += loss3.asnumpy()
            loss += lambda1 * loss3
            dim += 1
        if args.use_dihedral:
            losses = [criterion2(output[:, dim+i], batch[f"y{3+i}"]) for i in range(6)]
            total_loss_list[4] += sum([x.asnumpy() for x in losses])
            loss += lambda2 * sum(losses)
            dim += 6
        if args.use_coordinate:
            loss10 = criterion2(output[:, dim:dim+3], batch["y9"])
            total_loss_list[5] += loss10.asnumpy()
            loss += lambda1 * loss10
            dim += 3

        if stage == "train" and optimizer:
            loss.backward()
            if args.clip > 0:
                nn.ClipByGlobalNorm(args.clip)(model.trainable_params())
            optimizer.step()
            optimizer.zero_grad()

        total_loss += loss.asnumpy()
        total_loss_list[0] += loss.asnumpy()
        epoch_bar.set_postfix(loss=round(loss.asnumpy(), 2))

    return total_loss / loader.get_dataset_size(), total_loss_list

def create_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument("--p", type=float, default=0.5)
    parser.add_argument("--use_sasa", action="store_true")
    parser.add_argument("--use_bfactor", action="store_true")
    parser.add_argument("--use_dihedral", action="store_true")
    parser.add_argument("--use_coordinate", action="store_true")
    parser.add_argument("--lambda1", type=float, default=0.2)
    parser.add_argument("--lambda2", type=float, default=0.5)
    parser.add_argument("--use_denoise", action="store_true")
    parser.add_argument("--noise_type", type=str, default="wild")
    parser.add_argument("--date", type=str, default="Jun_15")
    parser.add_argument("--gnn", type=str, default="egnn")
    parser.add_argument("--problem_type", type=str, default="aa_classification")
    parser.add_argument("--lr", type=float, default=1e-3)
    parser.add_argument("--weight_decay", type=float, default=1e-2)
    parser.add_argument("--num_classes", type=int, default=20)
    parser.add_argument("--epochs", type=int, default=100)
    parser.add_argument("--batch_token_num", type=int, default=5120)
    parser.add_argument("--max_graph_token_num", type=int, default=2000)
    parser.add_argument("--node_dim", type=int, default=26)
    parser.add_argument("--edge_dim", type=int, default=93)
    parser.add_argument("--layer_num", type=int, default=6)
    parser.add_argument("--dropout", type=float, default=0)
    parser.add_argument("--JK", type=str, default="last")
    parser.add_argument("--clip", type=float, default=4.0)
    parser.add_argument("--c_alpha_max_neighbors", type=int, default=10)
    parser.add_argument("--protein_dataset", type=str, default="data/cath40_k10_dyn_imem")
    parser.add_argument("--mutant_dataset", type=str, default="data/evaluation")
    parser.add_argument("--gnn_config", type=str, default="src/Egnnconfig/egnn.yaml")
    return parser.parse_args()

if __name__ == '__main__':
    args = create_parser()
    gnn_config = yaml.load(open(args.gnn_config), Loader=yaml.FullLoader)[args.gnn]

    train_loader, val_loader, test_loader, mutant_loader = get_dataset(args, batch_size=1)

    filename = (
        args.date
        + f'_K={args.c_alpha_max_neighbors}_p={args.p}_sasa={args.use_sasa}_'
          f'bfactor={args.use_bfactor}_'
          f'lambda1={args.lambda1}_lambda2={args.lambda2}_'
          f'noise={args.noise_type}_'
          f'gnn={args.gnn}_layer={args.layer_num}_drop={args.dropout}_lr={args.lr}'
    )
    print("Filename:", filename)

    model = GNNModelMS(gnn_config, args)
    print_param_num(model)

    criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    criterion2 = nn.MSELoss()
    optimizer = nn.Adam(model.trainable_params(), learning_rate=args.lr, weight_decay=args.weight_decay)

    train_loss_list, valid_loss_list = [], []
    loss_sum, loss_cla, loss_sas, loss_bfa, loss_dih, loss_cor = [] ,[] ,[] ,[] ,[] ,[]

    for epoch in range(1, args.epochs + 1):
        start = time.time()

        train_loss, loss_list = epoch_runner(args, train_loader, model, criterion, criterion2, optimizer, stage="train")
        train_loss_list.append(train_loss)
        loss_sum.append(loss_list[0] / train_loader.get_dataset_size())
        loss_cla.append(loss_list[1] / train_loader.get_dataset_size())
        loss_sas.append(args.lambda1 * loss_list[2] / train_loader.get_dataset_size())
        loss_bfa.append(args.lambda1 * loss_list[3] / train_loader.get_dataset_size())
        loss_dih.append(args.lambda2 * loss_list[4] / train_loader.get_dataset_size())
        loss_cor.append(args.lambda1 * loss_list[5] / train_loader.get_dataset_size())

        val_loss, _ = epoch_runner(args, val_loader, model, criterion, criterion2, optimizer=None, stage="valid")
        valid_loss_list.append(val_loss)

        print(
            f"Epoch:{epoch:03d}, Train loss:{train_loss:.4f}, Valid loss:{val_loss:.4f}, Time: {time.time()-start:.2f}s"
        )

        plot_model(
            epoch, train_loss_list, loss_cla, loss_sas, loss_bfa, loss_dih,
            loss_cor, valid_loss_list, [], filename,
        )
    