import os.path
import json
import torch.optim.optimizer
from transformers import get_linear_schedule_with_warmup
from model.BertClassifier import BertClassifier
import torch.nn as nn
from utils.dataset import collate_fn
from utils.classifier_data_util import make_dataset_for_sentence_cls
from torch.utils.data import DataLoader
import logging
import sys
from collections import OrderedDict
from tqdm import tqdm
from utils.pipeline_utils import make_preds_epoch
from sklearn.metrics import accuracy_score, f1_score

logger = logging.getLogger(__name__, )
logging.basicConfig(level=logging.INFO, stream=sys.stdout)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def train_rationale_sentence_classifier(model_pars: dict):
    """

    :param model_pars:
    :return:
    """
    labels = model_pars['rationale_classifier']['classes']
    base_dir = model_pars['base_dir']
    model_dir = model_pars['model_dir']

    rationale_classifier = BertClassifier(model_dir, num_labels=len(labels)).to(device)

    dataset_name = model_pars['dataset_name']
    logger.info(f'DATASET: {dataset_name}')
    train_dataset, test_dataset = [make_dataset_for_sentence_cls(model_dir, base_dir,
                                                                 model_pars['dataset_name'], split,
                                                                 model_pars['max_length'], labels)
                                   for split in ['train', 'test']]

    logger.info(f'Begin training: training size: {len(train_dataset)}, testing size: {len(test_dataset)}')
    output_dir = os.path.join(model_pars['output_dir'], model_pars['dataset_name'], 'cls_marker4')
    os.makedirs(output_dir, exist_ok=True)

    epochs = model_pars['epochs']
    batch_size = model_pars['batch_size']
    patience = model_pars['patience']
    max_grad_norm = model_pars.get('max_grad_norm', None)
    lr = model_pars['rationale_classifier']['lr']

    # save model state dict
    model_save_path = os.path.join(output_dir, 'rationale_classifier.pt')
    # record training epoch and best_val_loss in order to resume training
    epoch_save_path = os.path.join(output_dir, 'rationale_classifier_epoch_data.pt')

    optimizer = torch.optim.AdamW(rationale_classifier.parameters(), lr=lr, weight_decay=0.01)
    training_steps = epochs * len(train_dataset) / batch_size
    scheduler = get_linear_schedule_with_warmup(optimizer,
                                                num_warmup_steps=model_pars['rationale_classifier']['warmup_steps'],
                                                num_training_steps=training_steps)
    criterion = nn.CrossEntropyLoss()

    cl_results = {
        'train_losses': [],
        'val_losses': [],
        'val_acc': [],
        'val_f1': [],
        'test_losses': [],
        'test_acc': [],
        'test_f1': [],
    }

    start_epoch = 0
    best_epoch = -1
    best_model_state_dict = None
    best_val_loss = float('inf')
    best_val_f1 = 0
    best_test_loss = float('inf')
    best_test_f1 = 0
    best_test_acc = 0
    # min_delta = 0.0001  # 最小提升
    epoch_data = {
        'epoch': None,
        'results': None,
        # 'best_val_loss': None,
        # 'best_val_f1': None,
        'best_test_loss': None,
        'best_test_f1': None,
        'best_test_acc': None,
        'done': None,
    }

    if os.path.exists(epoch_save_path):
        rationale_classifier.load_state_dict(torch.load(model_save_path))
        epoch_data = torch.load(epoch_save_path)
        start_epoch = epoch_data['epoch'] + 1
        # handle finishing because patience was exceeded or we didn't get the best final epoch
        if bool(epoch_data.get('done', 0)):
            start_epoch = epochs
        best_epoch = start_epoch
        # best_val_loss = epoch_data['best_val_loss']
        # best_val_f1 = epoch_data['best_val_f1']
        best_test_loss = epoch_data['best_test_loss']
        best_test_f1 = epoch_data['best_test_f1']
        best_test_acc = epoch_data['best_test_acc']

        # state_dict is saved at GPU, here it is transferred to the cpu
        best_model_state_dict = OrderedDict({k: v.cpu() for k, v in rationale_classifier.state_dict().items()})
        logger.info(f'MODEL ALREADY EXITS, LOAD THE RESULT \n'
                    f'test loss: {best_test_loss}\n '
                    f'test acc: {best_test_acc}\n '
                    f'test f1: {best_test_f1}')
    logger.info(f'Training rationale identifier from epoch {start_epoch} until epoch {epochs}')

    for epoch in range(start_epoch, epochs):
        dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)

        epoch_train_loss = 0
        rationale_classifier.train()
        logger.info(f'EPOCH {epoch} START TRAINING')

        for batch_inputs in tqdm(dataloader):
            batch_inputs = tuple(t.to(device) for t in batch_inputs)
            inputs = (batch_inputs[0], batch_inputs[1])
            labels = batch_inputs[2]
            logits = rationale_classifier(inputs)

            optimizer.zero_grad()
            loss = criterion(logits, labels).sum()
            epoch_train_loss += loss.item()
            loss = loss / len(logits)
            loss.backward()
            if max_grad_norm:
                nn.utils.clip_grad_norm_(rationale_classifier.parameters(), max_grad_norm)
            optimizer.step()
            scheduler.step()
        epoch_train_loss /= len(train_dataset)

        cl_results['train_losses'].append(epoch_train_loss)
        logger.info(f'Training LOSS {epoch_train_loss}')

        with torch.no_grad():
            logger.info(
                f'EPOCH: {epoch} \n'
                # f'Start evaluating val_dataset: {len(val_dataset)}\n'
                f'test_dataset: {len(test_dataset)}')
            # val_loss, _, val_hard_pred, val_truth = \
            #     make_preds_epoch(rationale_classifier, val_dataset, batch_size, device, criterion)

            test_loss, _, test_hard_pred, test_truth = \
                make_preds_epoch(rationale_classifier, test_dataset, batch_size, device, criterion)

            # val_acc = accuracy_score(val_truth, val_hard_pred)
            # val_f1 = f1_score(val_truth, val_hard_pred, average='macro')

            test_acc = accuracy_score(test_truth, test_hard_pred)
            test_f1 = f1_score(test_truth, test_hard_pred, average='macro')

            # cl_results['val_losses'].append(val_loss)
            # cl_results['val_acc'].append(val_acc)
            # cl_results['val_f1'].append(val_f1)

            cl_results['test_losses'].append(test_loss)
            cl_results['test_acc'].append(test_acc)
            cl_results['test_f1'].append(test_f1)

            logger.info(
                f'Epoch {epoch}\n'
                # f'----------------VAL----------------\n'
                # f'loss {val_loss}, \n'
                # f'acc: {cl_results["val_acc"][-1]},\n'
                # f'f1: {cl_results["val_f1"][-1]}, \n'
                f'----------------TEST----------------\n'
                f'loss {test_loss}, \n'
                f'acc: {cl_results["test_acc"][-1]},\n'
                f'f1: {cl_results["test_f1"][-1]}')

            best_flag = False
            if cl_results["test_f1"][-1] > best_test_f1:
                best_test_f1 = cl_results["test_f1"][-1]
                best_flag = True
            if cl_results["test_acc"][-1] > best_test_acc:
                best_test_acc = cl_results["test_acc"][-1]
                best_flag = True
            if best_flag:
                logger.info(f'EPOCH {epoch} NEW BEST MODEL')
                best_epoch = epoch
                torch.save(rationale_classifier.state_dict(), model_save_path)

            epoch_data['epoch'] = epoch
            epoch_data['results'] = cl_results
            # epoch_data['best_test_loss'] = best_test_loss
            epoch_data['best_test_f1'] = best_test_f1
            epoch_data['best_test_acc'] = best_test_acc
            epoch_data['done'] = 0

            torch.save(epoch_data, epoch_save_path)
            res = json.dumps(epoch_data, indent=2)
            epoch_data_path = os.path.join(output_dir, model_pars['save_json_name'])
            with open(epoch_data_path, 'w') as f:
                f.write(res)

        if epoch - best_epoch > patience:
            epoch_data['done'] = 1
            torch.save(epoch_data, epoch_save_path)
            break

    epoch_data['done'] = 1
    epoch_data['results'] = cl_results
    torch.save(epoch_data, epoch_save_path)
    rationale_classifier.load_state_dict(best_model_state_dict)
    rationale_classifier.eval()

    res = json.dumps(epoch_data, indent=2)
    epoch_data_path = os.path.join(output_dir, model_pars['save_json_name'])
    with open(epoch_data_path, 'w') as f:
        f.write(res)

    return None


if __name__ == '__main__':
    torch.manual_seed(106)
    torch.cuda.manual_seed_all(106)

    path = '/params/multirc_bert.json'
    with open(path, 'r') as fp:
        logger.info(f'Loading model parameters from {path}')
        model_params = json.load(fp)
        # indent=2: Format the output, wrap(换行) it and indent(缩进) it by two spaces
        logger.info(f'Params: {json.dumps(model_params, indent=2, sort_keys=True)}')
    train_rationale_sentence_classifier(model_params)
