import json
import os

import numpy as np
import pandas as pd
import torch
import torch.optim as optim
from torch_geometric.loader import DataLoader
from tqdm import tqdm
from sklearn.model_selection import train_test_split

import const as cn
from dataloader import get_load_dataset
from extract_data import extract_and_save_graph_data, extract_vocabulary
from model import PredictModel
from utils import load_pickle, eval_batch

bcls_criterion = torch.nn.BCEWithLogitsLoss()


def load_epoch(model_path, epoch):
    print('loading from epoch.%04d.pth' % epoch)
    return torch.load(os.path.join(model_path, 'epoch.%04d.pth' % epoch),
                      map_location='cpu')


def exec_eval(model, device, loader):
    model.eval()
    y_pred = []
    TP, FN, FP, TN = 0, 0, 0, 0
    for _, batch in enumerate(tqdm(loader, desc="Iteration")):
        eval_batch(batch, device, model, y_pred)
        # total_y +=len(y_pred[-1])
        for i in range(len(y_pred[-1])):
            if (batch.y[i] == 1 and y_pred[-1][i] == 1):
                TP += 1
            elif (batch.y[i] == 1 and y_pred[-1][i] == 0):
                FN += 1
            elif (batch.y[i] == 0 and y_pred[-1][i] == 1):
                FP += 1
            elif (batch.y[i] == 0 and y_pred[-1][i] == 0):
                TN += 1
    acc = (TP + TN) / (TP + TN + FN + FP)
    recall = (TP) / (TP + FN)
    precision = TP / (TP + FP)
    score = (1 + 0.5 * 0.5) * (precision * recall) / ((0.5 * 0.5 * precision) + recall)
    print("acc:", acc, "recall:", recall, "precision:", precision)
    print("score:", score)
    return score


def run(device, net_parameters, _type, x_train, x_val):
    if net_parameters["extract"]:
        print("Start building vocabulary on train dataset...")
        extract_vocabulary(_type, x_train, cn.DIR_TRAIN_AST, 10000, 2, cn.DIR_TMP_VOCAB)
        type_word2id = load_pickle(cn.FILE_WORD_TYPES.format(type=_type))
        value_word2id = load_pickle(cn.FILE_WORD_VALUES.format(type=_type))

        extract_and_save_graph_data(_type, x_train, cn.DIR_TRAIN_AST, cn.DIR_TMP_TRAIN_DATASET, type_word2id, value_word2id)
        extract_and_save_graph_data(_type, x_val, cn.DIR_TRAIN_AST, cn.DIR_TMP_VALID_DATASET, type_word2id, value_word2id)
    else:
        type_word2id = load_pickle(cn.FILE_WORD_TYPES.format(type=_type))
        value_word2id = load_pickle(cn.FILE_WORD_VALUES.format(type=_type))

    net_parameters["type_nums"] = len(type_word2id)
    net_parameters["value_nums"] = len(value_word2id)
    load_train_dataset = get_load_dataset(cn.DIR_TMP_TRAIN_DATASET, "train", x_train)
    load_val_dataset = get_load_dataset(cn.DIR_TMP_VALID_DATASET, "val", x_val)
    train_loader = DataLoader(load_train_dataset(), batch_size=net_parameters["batch_size"], shuffle=True)
    val_loader = DataLoader(load_val_dataset(), batch_size=net_parameters["batch_size"], shuffle=True)

    model = PredictModel(net_parameters).to(device)

    optimizer_state_dict = None
    if net_parameters["load"] > 0:
        model_state_dict, optimizer_state_dict = load_epoch(cn.DIR_MODEL, net_parameters["load"])
        model.load_state_dict(model_state_dict)

    optimizer = optim.Adam(
        model.parameters(), lr=net_parameters["learning_rate"]
    )
    if optimizer_state_dict is not None:
        optimizer.load_state_dict(optimizer_state_dict)

    loss_list = []
    best_valid_acc = 0
    continues_fials = net_parameters['continues_fials']
    for epoch in range(net_parameters["load"] + 1, net_parameters["epochs"] + 1):
        print("=====Epoch {} ====".format(epoch))
        print("Training...")
        losses = []
        for _, batch in enumerate(tqdm(train_loader)):
            batch = batch.to(device)
            pred = model(batch.node_type, batch.node_value, batch.edge_index, batch.edge_attr, batch.batch)
            optimizer.zero_grad()
            is_labeled = batch.y == batch.y
            if len(batch.y.shape) == 1:
                batch.y = torch.unsqueeze(batch.y, 1)
            loss = bcls_criterion(
                pred.to(torch.float32)[is_labeled],
                batch.y.to(torch.float32)[is_labeled],
            )
            losses.append(loss.item())
            loss.backward()
            optimizer.step()
        if net_parameters["save_every_epoch"] and epoch % net_parameters["save_every_epoch"] == 0:
            tqdm.write("saving to epoch.%04d.pth" % epoch)
            torch.save(
                (model.state_dict(), optimizer.state_dict()),
                os.path.join(cn.DIR_MODEL, "epoch.%04d.pth" % epoch),
            )
        loss = np.mean(losses)
        loss_list.append(loss)
        print(loss_list)

        if net_parameters["eval"]:
            print("Evaluating the training dataset...")
            cur_acc = exec_eval(model, device, val_loader)
            if cur_acc > best_valid_acc:
                print(f"best acc update, best acc={cur_acc}")
                best_valid_acc = cur_acc
                torch.save(
                    (model.state_dict(), optimizer.state_dict()),
                    cn.FILE_BEST_MODEL_MODEL.format(type=_type),
                )
                continues_fials = net_parameters['continues_fials']

            else:
                continues_fials -= 1
                if continues_fials == 0:
                    print(f"The performance of the model has not been improved by consecutive {net_parameters['continues_fials']} epoch, early stop")
                    break


def main():
    with open(cn.FILE_CONFIG_JSON) as f:
        net_parameters = json.load(f)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    total = pd.read_csv(cn.FILE_TRAIN_CSV_DATA)
    total['flag'] = ['{}_{}'.format(_type, _label) for _type, _label in zip(total['type'], total['label'])]

    x_train, x_val, _, _ = train_test_split(total, total['flag'], test_size=.2, random_state=0)
    for _type in ('jsp', ):
        x_t_train = x_train[x_train['type'] == _type]
        x_t_val = x_val[x_val['type'] == _type]
        run(device, net_parameters, _type, x_t_train, x_t_val)


if __name__ == "__main__":
    main()
