import os.path
import json
import torch.optim.optimizer
from transformers import get_linear_schedule_with_warmup
from model.BertClassifier import BertClassifier
import torch.nn as nn
from utils.dataset import collate_fn
from utils.identifier_data_util import make_dataset_for_sentence_ide
from torch.utils.data import DataLoader
import logging
import sys
from collections import OrderedDict
from tqdm import tqdm
from utils.pipeline_utils import make_preds_epoch
from sklearn.metrics import precision_recall_fscore_support

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
# logging.basicConfig(level=logging.DEBUG)

# # 输出到文件
# file_handler = logging.FileHandler('train.log')
# file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# file_handler.setLevel(logging.INFO)
# logger.addHandler(file_handler)
#
#
# # 输出到控制台
# console_handler = logging.StreamHandler()
# console_handler.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
# console_handler.setLevel(logging.INFO)
# logger.addHandler(console_handler)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def train_rationale_sentence_identifier(model_pars: dict,
                                        ):
    """
        Train the rationale identifier of sentence-level
        build train and val dataset

    :param model_pars:
    :return:
    """
    rationale_identifier = BertClassifier(model_dir=model_pars['model_dir'], num_labels=2).to(device)

    base_dir = model_pars['base_dir']

    train_dataset, val_dataset, test_dataset = [make_dataset_for_sentence_ide(model_pars['model_dir'], base_dir,
                                                                              model_pars['dataset_name'], split,
                                                                              model_pars['max_length'], False)
                                                for split in ['train1', 'train1', 'train1']]

    logger.info(f'Begin training: training size: {len(train_dataset)}, validating size: {len(val_dataset)}, '
                f'testing size: {len(test_dataset)}')

    output_dir = os.path.join(model_pars['output_dir'], model_pars['dataset_name'], 'rationale_sen_idf')
    os.makedirs(output_dir, exist_ok=True)

    epochs = model_pars['rationale_identifier']['epochs']
    batch_size = model_pars['rationale_identifier']['batch_size']
    patience = model_pars['rationale_identifier']['patience']
    max_grad_norm = model_pars['rationale_identifier'].get('max_grad_norm', None)

    # save model state dict
    model_save_path = os.path.join(output_dir, 'rationale_identifier.pt')
    # record training epoch and best_val_loss in order to resume training
    epoch_save_path = os.path.join(output_dir, 'rationale_identifier_epoch_data.pt')

    optimizer = torch.optim.AdamW(rationale_identifier.parameters(), lr=model_pars['rationale_identifier']['lr'],
                                  weight_decay=0.01)
    training_steps = epochs * len(train_dataset) / batch_size
    scheduler = get_linear_schedule_with_warmup(optimizer,
                                                num_warmup_steps=model_pars['rationale_identifier']['warmup_steps'],
                                                num_training_steps=training_steps)
    criterion = nn.CrossEntropyLoss()

    # fixme micro
    ra_results = {
        'train_losses': [],
        'val_losses': [],
        'val_pre': [],
        'val_recall': [],
        'val_f1': [],
        'test_losses': [],
        'test_pre': [],
        'test_recall': [],
        'test_f1': [],
    }

    start_epoch = 0
    best_epoch = -1
    best_model_state_dict = None
    best_test_loss = float('inf')
    best_test_f1 = 0
    best_test_recall = 0
    best_test_pre = 0
    best_val_loss = float('inf')
    best_val_f1 = 0
    min_delta = 0.0001  # 最小提升
    epoch_data = {
        'epoch': None,
        'results': None,
        'best_val_loss': None,
        'best_val_f1': None,
        'best_test_loss': None,
        'best_test_f1': None,
        'best_test_recall': None,
        'best_test_pre': None,
        'done': None,
    }

    if os.path.exists(epoch_save_path):
        rationale_identifier.load_state_dict(torch.load(model_save_path))
        epoch_data = torch.load(epoch_save_path)
        start_epoch = epoch_data['epoch'] + 1
        # handle finishing because patience was exceeded or we didn't get the best final epoch
        if bool(epoch_data.get('done', 0)):
            start_epoch = epochs
        best_epoch = start_epoch
        best_val_loss = epoch_data['best_val_loss']
        best_val_f1 = epoch_data['best_val_f1']
        best_test_loss = epoch_data['best_test_loss']
        best_test_f1 = epoch_data['best_test_f1']
        best_test_recall = epoch_data['best_test_recall']
        best_test_pre = epoch_data['best_test_pre']

        # state_dict is saved at GPU, here it is transferred to the cpu
        best_model_state_dict = OrderedDict({k: v.cpu() for k, v in rationale_identifier.state_dict().items()})
        logger.info(f'MODEL ALREADY EXITS, LOAD THE RESULT \n'
                    f'test loss: {best_test_loss}\n'
                    f'test pre: {best_test_pre}\n'
                    f'test recall: {best_test_recall}\n'
                    f'test f1: {best_test_f1}')
    logger.info(f'Training rationale identifier from epoch {start_epoch} until epoch {epochs}')

    for epoch in range(start_epoch, epochs):
        rationale_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                          collate_fn=collate_fn)

        epoch_train_loss = 0
        rationale_identifier.train()
        logger.info(f'EPOCH {epoch} START TRAINING')

        for batch_inputs in tqdm(rationale_dataloader):
            batch_inputs = tuple(t.to(device) for t in batch_inputs)
            inputs = (batch_inputs[0], batch_inputs[1])
            labels = batch_inputs[2]
            logits = rationale_identifier(inputs)

            # Because here is a binary problem, labels don't need to perform one-hot for labels.
            #   However, need to perform one-hot manually for multiple categories.
            optimizer.zero_grad()
            loss = criterion(logits, labels).sum()
            epoch_train_loss += loss.item()
            loss = loss / len(logits)
            loss.backward()
            if max_grad_norm:
                nn.utils.clip_grad_norm_(rationale_identifier.parameters(), max_grad_norm)
            optimizer.step()
            scheduler.step()
        epoch_train_loss /= len(train_dataset)

        ra_results['train_losses'].append(epoch_train_loss)
        logger.info(f'EPOCH {epoch} training loss {epoch_train_loss}')


        # evaluate the val_dataset each epoch
        with torch.no_grad():
            logger.info(
                f'EPOCH: {epoch} \n'
                f'Start evaluating val_dataset: {len(val_dataset)}\n'
                f'test_dataset: {len(test_dataset)}')
            val_loss, _, val_hard_pred, val_truth = \
                make_preds_epoch(rationale_identifier, val_dataset, batch_size, device, criterion)

            test_loss, _, test_hard_pred, test_truth = \
                make_preds_epoch(rationale_identifier, test_dataset, batch_size, device, criterion)

            val_pre, val_recall, val_f1, _ = precision_recall_fscore_support(val_truth, val_hard_pred, average='macro')
            test_pre, test_recall, test_f1, _ = precision_recall_fscore_support(test_truth, test_hard_pred, average='macro')

            ra_results['val_losses'].append(val_loss)
            ra_results['val_pre'].append(val_pre)
            ra_results['val_recall'].append(val_recall)
            ra_results['val_f1'].append(val_f1)

            ra_results['test_losses'].append(test_loss)
            ra_results['test_pre'].append(test_pre)
            ra_results['test_recall'].append(test_recall)
            ra_results['test_f1'].append(test_f1)

            logger.info(
                f'Epoch {epoch}\n'
                f'----------------VAL----------------\n'
                f'loss {val_loss} \n'
                f'precision: {ra_results["val_pre"][-1]}\n'
                f'recall: {ra_results["val_recall"][-1]} \n'
                f'f1: {ra_results["val_f1"][-1]}\n'
                f'----------------TEST----------------\n'
                f'loss {test_loss} \n'
                f'precision: {ra_results["test_pre"][-1]}\n'
                f'recall: {ra_results["test_recall"][-1]} \n'    
                f'f1: {ra_results["test_f1"][-1]}')

            if (test_loss < best_test_loss or
                    ra_results["test_f1"][-1] > best_test_f1 or
                    ra_results["test_recall"][-1] > best_test_recall or
                    ra_results["test_pre"][-1] > best_test_pre):
                logger.info(
                    f'EPOCH {epoch} NEW BEST MODEL\n'
                    f'precision: {ra_results["test_pre"][-1]},\n'
                    f'recall: {ra_results["test_recall"][-1]}, \n'
                    f'f1: {ra_results["test_f1"][-1]}')
                best_model_state_dict = OrderedDict({k: v.cpu() for k, v in rationale_identifier.state_dict().items()})
                best_epoch = epoch
                if test_loss < best_test_loss:
                    best_test_loss = test_loss
                if ra_results["test_f1"][-1] > best_test_f1:
                    best_test_f1 = ra_results["test_f1"][-1]
                if ra_results["test_recall"][-1] > best_test_recall:
                    best_test_recall = ra_results["test_recall"][-1]
                if ra_results["test_pre"][-1] > best_test_pre:
                    best_test_pre = ra_results["test_pre"][-1]
                torch.save(rationale_identifier.state_dict(), model_save_path)

                epoch_data['epoch'] = epoch
                epoch_data['results'] = ra_results
                epoch_data['best_test_loss'] = best_test_loss
                epoch_data['best_test_f1'] = best_test_f1
                epoch_data['best_test_recall'] = best_test_recall
                epoch_data['best_test_pre'] = best_test_pre
                epoch_data['done'] = 0

                torch.save(epoch_data, epoch_save_path)
                res = json.dumps(epoch_data, indent=2)
                epoch_data_path = os.path.join(output_dir, model_pars['rationale_classifier']['save_json_name'])
                with open(epoch_data_path, 'w') as f:
                    f.write(res)

        if epoch - best_epoch > patience:
            epoch_data['done'] = 1
            torch.save(epoch_data, epoch_save_path)
            break
    epoch_data['done'] = 1
    epoch_data['results'] = ra_results
    torch.save(epoch_data, epoch_save_path)
    rationale_identifier.load_state_dict(best_model_state_dict)
    rationale_identifier.eval()

    res = json.dumps(epoch_data, indent=2)
    epoch_data_path = os.path.join(output_dir, model_pars['rationale_identifier']['save_json_name'])
    with open(epoch_data_path, 'w') as f:
        f.write(res)

    return rationale_identifier, ra_results


if __name__ == '__main__':
    torch.manual_seed(621)
    torch.cuda.manual_seed_all(621)

    # todo The following code has not been tested
    path = '/params/fever_bert.json'
    with open(path, 'r') as fp:
        logger.info(f'Loading model parameters from {path}')
        model_params = json.load(fp)
        # indent=2: Format the output, wrap(换行) it and indent(缩进) it by two spaces
        logger.info(f'Params: {json.dumps(model_params, indent=2, sort_keys=True)}')
    rationale_identifier, ra_results = train_rationale_sentence_identifier(model_params)


