import os.path
import json
import torch.optim.optimizer
from transformers import BertTokenizer
from transformers import get_linear_schedule_with_warmup
from model.BertClassifier import BertClassifier
import torch.nn as nn
from utils.dataset import collate_fn_ide2cls, ClassificationDataset, collate_fn
from utils.joint_data_util import make_dataset_for_ide2cls, doc_id_2_doc
from utils.classifier_data_util import make_single_instance_for_classifier, make_dataset_for_sentence_cls
from torch.utils.data import DataLoader
import logging
import sys
from collections import OrderedDict
from tqdm import tqdm
from utils.pipeline_utils import make_preds_epoch_joint_dataset, make_preds_epoch
from sklearn.metrics import precision_recall_fscore_support, accuracy_score, f1_score

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, stream=sys.stdout)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def train_ide_2_cls(model_pars: dict):
    """
        Doc -> identifier ->(query, rationales which substitute context)
         classifier -> cls_label
    :param model_pars:
    :return:
    """

    labels = model_pars['rationale_classifier']['classes']
    base_dir = model_pars['base_dir']
    model_dir = model_pars['model_dir']

    identifier = BertClassifier(model_dir, num_labels=2, dropout_prob=model_pars['dropout_prob']).to(device)
    classifier = BertClassifier(model_dir=model_dir, num_labels=len(labels)).to(device)
    tokenizer = BertTokenizer.from_pretrained(model_dir)

    ide_train_dataset, ide_test_dataset = [
        make_dataset_for_ide2cls(model_dir, base_dir,
                                 model_pars['dataset_name'], split,
                                 model_pars['max_length'])
        for split in ['train', 'test']]

    cls_test_dataset = make_dataset_for_sentence_cls(model_dir, base_dir,
                                                     model_pars['dataset_name'], 'test',
                                                     model_pars['max_length'], labels)

    logger.info(f'Identifier Datasize:\n'
                f'Train: {len(ide_train_dataset)}\n'
                # f'Val: {len(ide_val_dataset)}\n'
                f'Test: {len(ide_test_dataset)}')

    output_dir = os.path.join(model_pars['output_dir'], model_pars['dataset_name'], 'ide_2_cls')
    os.makedirs(output_dir, exist_ok=True)

    epochs = model_pars['epochs']
    batch_size = model_pars['batch_size']
    patience = model_pars['patience']
    max_grad_norm = model_pars.get('max_grad_norm', None)

    # save model state dict
    ide_save_path = os.path.join(output_dir, 'identifier.pt')
    cls_save_path = os.path.join(output_dir, 'classifier.pt')
    # record training epoch and best_val_loss in order to resume training
    epoch_save_path = os.path.join(output_dir, 'ide2cls_epoch_data.pt')

    ide_optimizer = torch.optim.AdamW(identifier.parameters(), lr=model_pars['rationale_identifier']['lr'],
                                      weight_decay=0.01)
    ide_training_steps = epochs * len(ide_train_dataset) / batch_size
    ide_scheduler = get_linear_schedule_with_warmup(ide_optimizer,
                                                    num_warmup_steps=model_pars['rationale_identifier']['warmup_steps'],
                                                    num_training_steps=ide_training_steps)
    ide_criterion = nn.CrossEntropyLoss()

    cls_optimizer = torch.optim.AdamW(classifier.parameters(), lr=model_pars['rationale_classifier']['lr'],
                                      weight_decay=0.01)
    cls_criterion = nn.CrossEntropyLoss()

    cls_results = {
        'train_losses': [],
        'val_losses': [],
        'val_acc': [],
        'val_f1': [],
        'test_losses': [],
        'test_acc': [],
        'test_f1': [],
    }
    ide_results = {
        'train_losses': [],
        'val_losses': [],
        'val_pre': [],
        'val_recall': [],
        'val_f1': [],
        'test_losses': [],
        'test_pre': [],
        'test_recall': [],
        'test_f1': [],
    }
    joint_results = {
        'joint_losses': [],
        'joint_acc': []
    }

    start_epoch = 0
    best_epoch = -1
    best_model_state_dict = None
    # best_val_loss = float('inf')
    # best_val_f1 = 0
    best_cls_test_loss = float('inf')
    best_cls_test_f1 = 0
    best_cls_test_acc = 0
    best_joint_acc = 0
    min_delta = 0.0001  # 最小提升
    epoch_data = {
        'epoch': None,
        'cls_results': None,
        'ide_results': None,
        'joint_results': None,
        # 'best_val_loss': None,
        # 'best_val_f1': None,
        'best_cls_test_loss': None,
        'best_cls_test_f1': None,
        'best_cls_test_acc': None,
        'best_joint_acc': None,
        'done': None,
    }

    if os.path.exists(epoch_save_path):
        identifier.load_state_dict(torch.load(ide_save_path))
        classifier.load_state_dict(torch.load(cls_save_path))
        epoch_data = torch.load(epoch_save_path)
        start_epoch = epoch_data['epoch'] + 1
        # handle finishing because patience was exceeded or we didn't get the best final epoch
        if bool(epoch_data.get('done', 0)):
            start_epoch = epochs
        best_epoch = start_epoch
        # best_val_loss = epoch_data['best_val_loss']
        # best_val_f1 = epoch_data['best_val_f1']
        best_cls_test_loss = epoch_data['best_cls_test_loss']
        best_cls_test_f1 = epoch_data['best_cls_test_f1']
        best_cls_test_acc = epoch_data['best_cls_test_acc']
        best_joint_acc = epoch_data.get('best_joint_acc', 0)
        # best_cls_test_acc = 0.610756689

        logger.info(f'MODEL ALREADY EXITS, LOAD THE RESULT \n'
                    f'test loss: {best_cls_test_loss}\n '
                    f'test acc: {best_cls_test_acc}\n '
                    f'test f1: {best_cls_test_f1}')
    logger.info(f'Training rationale identifier from epoch {start_epoch} until epoch {epochs}')

    for epoch in range(start_epoch, epochs):
        ide_train_dataloader = DataLoader(ide_train_dataset, batch_size=batch_size, shuffle=True,
                                          collate_fn=collate_fn_ide2cls)
        ide_train_loss = 0
        cls_train_loss = 0
        classifier.train()
        identifier.train()

        # prepare for the cls dataset
        train_doc_ids, train_queries, train_rationales, train_cls_labels = [], [], [], []

        # train identifier
        logger.info('--------------------- identifier ---------------------')
        logger.info(f'EPOCH {epoch} TRAINING IDENTIFIER......')
        for ide_batch_inputs in tqdm(ide_train_dataloader):
            ide_inputs = (ide_batch_inputs[0].to(device), ide_batch_inputs[1].to(device))
            ide_labels = ide_batch_inputs[2].to(device)
            query_epoch = ide_batch_inputs[3]
            cls_label_epoch = ide_batch_inputs[4]  # eg: SUPPORTS
            doc_id_epoch = ide_batch_inputs[5]
            sentence_epoch = ide_batch_inputs[6]

            ide_logits = identifier(ide_inputs)
            ide_optimizer.zero_grad()
            ide_loss = ide_criterion(ide_logits, ide_labels).sum()
            ide_train_loss += ide_loss.item()
            ide_loss = ide_loss / len(ide_logits)
            ide_loss.backward()
            if max_grad_norm:
                nn.utils.clip_grad_norm_(identifier.parameters(), max_grad_norm)
            ide_optimizer.step()
            ide_scheduler.step()

            for i in range(len(ide_logits)):
                if ide_logits[i][1] > ide_logits[i][0]:
                    # 1 是 正例 即 rationale
                    train_doc_ids.append(doc_id_epoch[i])
                    train_queries.append(query_epoch[i])
                    train_rationales.append([sentence_epoch[i]])
                    train_cls_labels.append(cls_label_epoch[i])

        ide_train_loss /= len(ide_train_dataset)
        ide_results['train_losses'].append(ide_train_loss)
        logger.info(f'EPOCH {epoch} IDENTIFIER training loss {ide_train_loss}')
        ra_num = 0  # Count the number of correct rationale predictions in the identifier

        # test identifier
        with torch.no_grad():
            logger.info(f'EPOCH: {epoch} TESTING IDENTIFIER.....')
            # return joint dataset
            test_loss, _, test_hard_pred, test_truth, \
                joint_doc_ids, joint_queries, joint_rationales, joint_cls_labels = \
                    make_preds_epoch_joint_dataset(identifier, ide_test_dataset, batch_size, device, ide_criterion)
            test_pre, test_recall, test_f1, _ = precision_recall_fscore_support(test_truth, test_hard_pred,
                                                                                average='macro')

            for i in range(len(test_hard_pred)):
                if test_hard_pred[i] == test_truth[i] == 1:
                    ra_num += 1

            ide_results['test_losses'].append(test_loss)
            ide_results['test_pre'].append(test_pre)
            ide_results['test_recall'].append(test_recall)
            ide_results['test_f1'].append(test_f1)

            logger.info(
                f'Epoch {epoch}\n'
                f'----------------TEST----------------\n'
                f'loss {test_loss} \n'
                f'precision: {test_pre}\n'
                f'recall: {test_recall} \n'
                f'f1: {test_f1}')

        logger.info('--------------------- classifier ---------------------')
        logger.info(f'EPOCH {epoch} TRAINING IDENTIFIER......')
        if len(train_doc_ids) != 0:
            train_docs = doc_id_2_doc(base_dir, model_pars['dataset_name'], train_doc_ids)
            cls_train_instance_list = make_single_instance_for_classifier(model_pars['dataset_name'], train_docs,
                                                                          train_queries,
                                                                          train_rationales,
                                                                          train_cls_labels,
                                                                          model_pars['rationale_classifier']['classes'])
            cls_train_dataset = ClassificationDataset(cls_train_instance_list, tokenizer, model_pars['max_length'])
            joint_docs = doc_id_2_doc(base_dir, model_pars['dataset_name'], joint_doc_ids)
            cls_joint_instance_list = make_single_instance_for_classifier(model_pars['dataset_name'], joint_docs,
                                                                          joint_queries,
                                                                          joint_rationales,
                                                                          joint_cls_labels,
                                                                          model_pars['rationale_classifier']['classes'])
            cls_joint_dataset = ClassificationDataset(cls_joint_instance_list, tokenizer, model_pars['max_length'])

            logger.info(f'Classifier Datasize:\n'
                        f'Train: {len(cls_train_dataset)}\n'
                        f'Test: {len(cls_test_dataset)}')

            cls_train_dataloader = DataLoader(cls_train_dataset, batch_size=batch_size, shuffle=True,
                                              collate_fn=collate_fn)

            for batch_inputs in tqdm(cls_train_dataloader):
                batch_inputs = tuple(t.to(device) for t in batch_inputs)
                inputs = (batch_inputs[0], batch_inputs[1])
                cls_labels = batch_inputs[2]
                cls_logits = classifier(inputs)

                cls_optimizer.zero_grad()
                cls_loss = cls_criterion(cls_logits, cls_labels).sum()
                cls_train_loss += cls_loss.item()
                cls_loss = cls_loss / len(cls_logits)
                cls_loss.backward()
                if max_grad_norm:
                    nn.utils.clip_grad_norm_(classifier.parameters(), max_grad_norm)
                cls_optimizer.step()
            cls_train_loss /= len(cls_train_dataset)

            cls_results['train_losses'].append(cls_train_loss)
            logger.info(f'EPOCH {epoch} CLASSIFIER training loss {cls_train_loss}')
        else:
            logger.info('train dataset is empty. Skip it')

            ide_results['test_losses'].append(0)
            ide_results['test_pre'].append(0)
            ide_results['test_recall'].append(0)
            ide_results['test_f1'].append(0)

        with torch.no_grad():
            logger.info(f'EPOCH: {epoch} TESTING CLASSIFIER.....')
            test_loss, _, test_hard_pred, test_truth = \
                make_preds_epoch(classifier, cls_test_dataset, batch_size, device, cls_criterion)

            test_acc = accuracy_score(test_truth, test_hard_pred)
            test_f1 = f1_score(test_truth, test_hard_pred, average='macro', zero_division=0)

            cls_results['test_losses'].append(test_loss)
            cls_results['test_acc'].append(test_acc)
            cls_results['test_f1'].append(test_f1)

            logger.info(
                f'Epoch {epoch}\n'
                f'----------------TEST----------------\n'
                f'loss {test_loss}, \n'
                f'acc: {test_acc},\n'
                f'f1: {test_f1}')

            logger.info(f'EPOCH: {epoch} TESTING JOINT DATASET.....')
            joint_loss, _, joint_hard_pred, joint_truth = \
                make_preds_epoch(classifier, cls_joint_dataset, batch_size, device, cls_criterion)

            correct = 0
            for i in tqdm(range(len(joint_hard_pred))):
                if joint_hard_pred[i] == joint_truth[i]:
                    correct += 1

            joint_acc = correct / ra_num

            joint_results['joint_losses'].append(joint_loss)
            joint_results['joint_acc'].append(joint_acc)
            logger.info(
                f'Epoch {epoch}\n'
                f'----------------JOINT----------------\n'
                f'loss {joint_loss}\n'
                f'acc: {joint_acc}')

            if (test_loss < best_cls_test_loss - min_delta or
                    cls_results["test_f1"][-1] > best_cls_test_f1 + min_delta or
                    cls_results["test_acc"][-1] > best_cls_test_acc + min_delta or
                    joint_results["joint_acc"][-1] > best_joint_acc + min_delta):
                logger.info(
                    f'EPOCH {epoch} NEW BEST MODEL\n'
                    f'acc: {cls_results["test_acc"][-1]}\n'
                    f'f1: {cls_results["test_f1"][-1]}\n'
                    f'joint_acc: {joint_results["joint_acc"][-1]}')
                # best_model_state_dict = OrderedDict({k: v.cpu() for k, v in classifier.state_dict().items()})
                best_epoch = epoch
                if test_loss < best_cls_test_loss - min_delta:
                    best_cls_test_loss = test_loss
                if cls_results["test_f1"][-1] > best_cls_test_f1 + min_delta:
                    best_cls_test_f1 = cls_results["test_f1"][-1]
                if cls_results["test_acc"][-1] > best_cls_test_acc + min_delta:
                    best_cls_test_acc = cls_results["test_acc"][-1]
                if joint_results["joint_acc"][-1] > best_joint_acc + min_delta:
                    best_joint_acc = joint_results["joint_acc"][-1]
                torch.save(classifier.state_dict(), cls_save_path)
                torch.save(identifier.state_dict(), ide_save_path)

                epoch_data['epoch'] = epoch
                epoch_data['cls_results'] = cls_results
                epoch_data['ide_results'] = ide_results
                epoch_data['joint_results'] = joint_results
                epoch_data['best_cls_test_loss'] = best_cls_test_loss
                epoch_data['best_cls_test_f1'] = best_cls_test_f1
                epoch_data['best_cls_test_acc'] = best_cls_test_acc
                epoch_data['best_joint_acc'] = best_joint_acc
                epoch_data['done'] = 0

                torch.save(epoch_data, epoch_save_path)
                res = json.dumps(epoch_data, indent=2)
                epoch_data_path = os.path.join(output_dir, model_pars['save_json_name'])
                with open(epoch_data_path, 'w') as f:
                    f.write(res)

        if epoch - best_epoch > patience:
            epoch_data['done'] = 1
            torch.save(epoch_data, epoch_save_path)
            break

    epoch_data['epoch'] = epochs - 1
    epoch_data['best_cls_test_loss'] = best_cls_test_loss
    epoch_data['best_cls_test_f1'] = best_cls_test_f1
    epoch_data['best_cls_test_acc'] = best_cls_test_acc
    epoch_data['best_joint_acc'] = best_joint_acc
    epoch_data['done'] = 1
    epoch_data['cls_results'] = cls_results
    epoch_data['ide_results'] = ide_results
    epoch_data['joint_results'] = joint_results
    torch.save(epoch_data, epoch_save_path)
    # rationale_classifier.load_state_dict(best_model_state_dict)
    # rationale_classifier.eval()

    res = json.dumps(epoch_data, indent=2)
    epoch_data_path = os.path.join(output_dir, model_pars['save_json_name'])
    with open(epoch_data_path, 'w') as f:
        f.write(res)

    return None


if __name__ == '__main__':
    torch.manual_seed(106)
    torch.cuda.manual_seed_all(106)

    path = '/gemini/code/RationaleLoop/params/movies_ide_2_cls.json'
    with open(path, 'r') as fp:
        logger.info(f'Loading model parameters from {path}')
        model_params = json.load(fp)
        # indent=2: Format the output, wrap(换行) it and indent(缩进) it by two spaces
        logger.info(f'Params: {json.dumps(model_params, indent=2)}')
        train_ide_2_cls(model_params)
