import os.path
import json
import logging
import sys
import torch.nn as nn
import torch.optim.optimizer

from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score, precision_recall_fscore_support
from transformers import get_linear_schedule_with_warmup, BertTokenizer
from torch.utils.data import DataLoader

from model.BertClassifier import BertClassifier
from datasets.loop_marker_dataset import collate_fn, ClassificationDataset
from utils.classifier_data_util import make_cls_dataset, make_single_instance_for_classifier
from utils.identifier_data_util import make_ide_train_dataset
from utils.joint_data_util import doc_id_2_doc, make_ide_test_dataset
from utils.pipeline_utils import make_preds_epoch, make_preds_epoch_joint_dataset


logger = logging.getLogger(__name__, )
logging.basicConfig(level=logging.INFO, stream=sys.stdout)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def train_baseline(model_pars: dict):
    model_dir = model_pars['model_dir']
    base_dir = model_pars['base_dir']
    labels = model_pars['rationale_classifier']['classes']
    sample = bool(model_pars['sample'])
    context = bool(model_pars['context'])
    marker = bool(model_pars['marker'])

    identifier = BertClassifier(model_dir, num_labels=2, dropout_prob=model_pars['dropout_prob']).to(device)
    classifier = BertClassifier(model_dir, num_labels=len(labels)).to(device)

    tokenizer = BertTokenizer.from_pretrained(model_dir)

    dataset_name = model_pars['dataset_name']
    logger.info(f'DATASET: {dataset_name}')

    cls_train_dataset, cls_test_dataset = [make_cls_dataset(model_dir, base_dir,
                                                                         dataset_name, split,
                                                                         model_pars['max_length'],
                                                                         labels, context, marker)
                                           for split in ['train', 'test']]

    ide_train_dataset = make_ide_train_dataset(model_dir, base_dir, dataset_name, 'train',
                                                      model_pars['max_length'], sample, context, marker)

    ide_test_dataset = make_ide_test_dataset(model_dir, base_dir,
                                                dataset_name, 'test',
                                                model_pars['max_length'], context, marker)

    logger.info(f'IDENTIFIER Dataset: \n'
                f'train: {len(ide_train_dataset)}; test: {len(ide_test_dataset)}\n'
                f'CLASSIFIER Dataset: \n'
                f'train: {len(cls_train_dataset)}; test: {len(cls_test_dataset)}')

    output_dir = os.path.join(model_pars['output_dir'], dataset_name, model_pars['output_name'])
    os.makedirs(output_dir, exist_ok=True)

    epochs = model_pars['epochs']
    batch_size = model_pars['batch_size']
    patience = model_pars['patience']
    max_grad_norm = model_pars.get('max_grad_norm', None)

    # save model state dict
    ide_save_path = os.path.join(output_dir, 'identifier.pt')
    cls_save_path = os.path.join(output_dir, 'classifier.pt')

    epoch_save_path = os.path.join(output_dir, 'baseline_epoch_data.pt')

    ide_optimizer = torch.optim.AdamW(identifier.parameters(), lr=model_pars['rationale_identifier']['lr'],
                                      weight_decay=0.01)
    ide_training_steps = epochs * len(ide_train_dataset) / batch_size
    ide_scheduler = get_linear_schedule_with_warmup(ide_optimizer,
                                                    num_warmup_steps=model_pars['rationale_identifier']['warmup_steps'],
                                                    num_training_steps=ide_training_steps)
    ide_criterion = nn.CrossEntropyLoss()

    cls_optimizer = torch.optim.AdamW(classifier.parameters(), lr=model_pars['rationale_classifier']['lr'],
                                      weight_decay=0.01)
    cls_training_steps = epochs * len(cls_train_dataset) / batch_size
    cls_scheduler = get_linear_schedule_with_warmup(cls_optimizer,
                                                    num_warmup_steps=model_pars['rationale_classifier']['warmup_steps'],
                                                    num_training_steps=cls_training_steps)

    cls_criterion = nn.CrossEntropyLoss()

    cls_results = {
        'train_losses': [],
        'val_losses': [],
        'val_acc': [],
        'val_f1': [],
        'test_losses': [],
        'test_acc': [],
        'test_f1': [],
    }
    ide_results = {
        'train_losses': [],
        'val_losses': [],
        'val_pre': [],
        'val_recall': [],
        'val_f1': [],
        'test_losses': [],
        'test_pre': [],
        'test_recall': [],
        'test_f1': [],
    }
    joint_results = {
        'joint_losses': [],
        'joint_acc': []
    }

    start_epoch = 0
    best_epoch = -1
    best_model_state_dict = None
    best_ide_test_f1 = 0
    best_ide_test_recall = 0
    best_ide_test_pre = 0
    best_cls_test_f1 = 0
    best_cls_test_acc = 0
    best_joint_acc = 0
    min_delta = 0.0001  # 最小提升
    epoch_data = {
        'epoch': None,
        'cls_results': None,
        'ide_results': None,
        'joint_results': None,
        'best_ide_test_f1': None,
        'best_ide_test_recall': None,
        'best_ide_test_pre': None,
        'best_cls_test_f1': None,
        'best_cls_test_acc': None,
        'best_joint_acc': None,
        'done': None,
    }
    if os.path.exists(epoch_save_path):
        identifier.load_state_dict(torch.load(ide_save_path))
        classifier.load_state_dict(torch.load(cls_save_path))
        epoch_data = torch.load(epoch_save_path)
        start_epoch = epoch_data['epoch'] + 1
        if bool(epoch_data.get('done', 0)):
            start_epoch = epochs
        best_epoch = start_epoch
        best_ide_test_f1 = epoch_data['best_ide_test_f1']
        best_ide_test_recall = epoch_data['best_ide_test_recall']
        best_ide_test_pre = epoch_data['best_ide_test_pre']
        best_cls_test_f1 = epoch_data['best_cls_test_f1']
        best_cls_test_acc = epoch_data['best_cls_test_acc']
        best_joint_acc = epoch_data.get('best_joint_acc', 0)

        logger.info(f'MODEL ALREADY EXITS, LOAD THE RESULT \n'
                    f'IDENTIFIER: \n'
                    f'ide_test_f1: {best_ide_test_f1}\n'
                    f'ide_test_recall: {best_ide_test_recall}\n'
                    f'ide_test_pre: {best_ide_test_pre}\n'
                    f'CLASSIFIER: \n'
                    f'cls_test_acc: {best_cls_test_acc}\n '
                    f'cls_test_f1: {best_cls_test_f1}\n'
                    f'JOINT acc: {best_joint_acc}')
    logger.info(f'Training rationale identifier from epoch {start_epoch} until epoch {epochs}')

    for epoch in range(start_epoch, epochs):
        ide_train_dataloader = DataLoader(ide_train_dataset, batch_size=batch_size, shuffle=True,
                                          collate_fn=collate_fn)
        cls_train_dataloader = DataLoader(cls_train_dataset, batch_size=batch_size, shuffle=True,
                                          collate_fn=collate_fn)

        ide_train_loss = 0
        cls_train_loss = 0
        classifier.train()
        identifier.train()

        logger.info('--------------------- identifier ---------------------')
        logger.info(f'EPOCH {epoch} TRAINING IDENTIFIER......')

        for batch_inputs in tqdm(ide_train_dataloader):
            batch_inputs = tuple(t.to(device) for t in batch_inputs)
            inputs = (batch_inputs[0], batch_inputs[1])
            labels = batch_inputs[2]
            logits, _ = identifier(inputs)

            ide_optimizer.zero_grad()
            ide_loss = ide_criterion(logits, labels).sum()
            ide_train_loss += ide_loss.item()
            ide_loss.backward()
            if max_grad_norm:
                nn.utils.clip_grad_norm_(identifier.parameters(), max_grad_norm)
            ide_optimizer.step()
            ide_scheduler.step()
        ide_train_loss /= len(ide_train_dataloader)
        ide_results['train_losses'].append(ide_train_loss)
        logger.info(f'EPOCH {epoch} IDENTIFIER training loss {ide_train_loss}')
        ra_num = 0

        with torch.no_grad():
            logger.info(f'EPOCH: {epoch} TESTING IDENTIFIER.....')
            test_loss, _, test_hard_pred, test_truth, \
            joint_doc_ids, joint_queries, joint_rationales, joint_cls_labels = \
                make_preds_epoch_joint_dataset(identifier, ide_test_dataset, batch_size, device, ide_criterion)

            test_pre, test_recall, test_f1, _ = precision_recall_fscore_support(test_truth, test_hard_pred,
                                                                                average='macro')

            for i in range(len(test_hard_pred)):
                if test_hard_pred[i] == test_truth[i] == 1:
                    ra_num += 1

            ide_results['test_losses'].append(test_loss[0])
            ide_results['test_pre'].append(test_pre)
            ide_results['test_recall'].append(test_recall)
            ide_results['test_f1'].append(test_f1)

            logger.info(
                f'Epoch {epoch}\n'
                f'----------------TEST----------------\n'
                f'loss {test_loss[0]} \n'
                f'precision: {test_pre}\n'
                f'recall: {test_recall} \n'
                f'f1: {test_f1}')

        logger.info('--------------------- classifier ---------------------')
        logger.info(f'EPOCH {epoch} TRAINING CLASSIFIER......')

        for batch_inputs in tqdm(cls_train_dataloader):
            batch_inputs = tuple(t.to(device) for t in batch_inputs)
            inputs = (batch_inputs[0], batch_inputs[1])
            labels = batch_inputs[2]
            logits, _ = classifier(inputs)

            cls_optimizer.zero_grad()
            cls_loss = cls_criterion(logits, labels).sum()
            cls_train_loss += cls_loss.item()
            cls_loss.backward()
            if max_grad_norm:
                nn.utils.clip_grad_norm_(classifier.parameters(), max_grad_norm)
            cls_optimizer.step()
            cls_scheduler.step()
        cls_train_loss /= len(cls_train_dataloader)

        joint_docs = doc_id_2_doc(base_dir, model_pars['dataset_name'], joint_doc_ids)
        cls_joint_instance_list = make_single_instance_for_classifier(model_pars['dataset_name'], joint_docs,
                                                                      joint_queries,
                                                                      joint_rationales,
                                                                      joint_cls_labels,
                                                                      model_pars['rationale_classifier']['classes'])
        cls_joint_dataset = ClassificationDataset(cls_joint_instance_list, tokenizer,
                                                  model_pars['max_length'], context, marker)

        cls_results['train_losses'].append(cls_train_loss)
        logger.info(f'EPOCH {epoch} CLASSIFIER training loss {cls_train_loss}')

        with torch.no_grad():
            logger.info(f'EPOCH: {epoch} TESTING CLASSIFIER.....')
            test_loss, _, test_hard_pred, test_truth = \
                make_preds_epoch(classifier, cls_test_dataset, batch_size, device, cls_criterion)

            test_acc = accuracy_score(test_truth, test_hard_pred)
            test_f1 = f1_score(test_truth, test_hard_pred, average='macro', zero_division=0)

            cls_results['test_losses'].append(test_loss[0])
            cls_results['test_acc'].append(test_acc)
            cls_results['test_f1'].append(test_f1)

            logger.info(
                f'Epoch {epoch}\n'
                f'----------------TEST----------------\n'
                f'loss {test_loss[0]}, \n'
                f'acc: {test_acc},\n'
                f'f1: {test_f1}')

            logger.info(f'EPOCH: {epoch} TESTING JOINT DATASET.....')
            if len(cls_joint_dataset) != 0:
                joint_loss, _, joint_hard_pred, joint_truth = \
                    make_preds_epoch(classifier, cls_joint_dataset, batch_size, device, cls_criterion)

                correct = 0
                for i in tqdm(range(len(joint_hard_pred))):
                    if joint_hard_pred[i] == joint_truth[i]:
                        correct += 1

                joint_acc = correct / ra_num

                joint_results['joint_losses'].append(joint_loss[0])
                joint_results['joint_acc'].append(joint_acc)
                logger.info(
                    f'Epoch {epoch}\n'
                    f'----------------JOINT----------------\n'
                    f'loss {joint_loss[0]}\n'
                    f'acc: {joint_acc}')
            else:
                joint_results['joint_losses'].append(-1)
                joint_results['joint_acc'].append(0)
                logger.info(f'length of cls joint dataset is 0, skip this time')

        best_flag = False
        if ide_results['test_pre'][-1] > best_ide_test_pre + min_delta:
            best_ide_test_pre = ide_results['test_pre'][-1]
            best_flag = True
        if ide_results['test_recall'][-1] > best_ide_test_recall + min_delta:
            best_ide_test_recall = ide_results['test_recall'][-1]
            best_flag = True
        if ide_results['test_f1'][-1] > best_ide_test_f1 + min_delta:
            best_ide_test_f1 = ide_results['test_f1'][-1]
            best_flag = True
        if cls_results['test_f1'][-1] > best_cls_test_f1 + min_delta:
            best_cls_test_f1 = cls_results['test_f1'][-1]
            best_flag = True
        if cls_results['test_acc'][-1] > best_cls_test_acc + min_delta:
            best_cls_test_acc = cls_results['test_acc'][-1]
            best_flag = True
        if joint_results["joint_acc"][-1] > best_joint_acc + min_delta:
            best_joint_acc = joint_results["joint_acc"][-1]
            best_flag = True
        if best_flag:
            logger.info(f'EPOCH {epoch} NEW BEST MODEL')
            torch.save(classifier.state_dict(), cls_save_path)
            torch.save(identifier.state_dict(), ide_save_path)
            best_epoch = epoch

        epoch_data['epoch'] = epoch
        epoch_data['cls_results'] = cls_results
        epoch_data['ide_results'] = ide_results
        epoch_data['joint_results'] = joint_results
        epoch_data['best_ide_test_pre'] = best_ide_test_pre
        epoch_data['best_ide_test_recall'] = best_ide_test_recall
        epoch_data['best_ide_test_f1'] = best_ide_test_f1
        epoch_data['best_cls_test_f1'] = best_cls_test_f1
        epoch_data['best_cls_test_acc'] = best_cls_test_acc
        epoch_data['best_joint_acc'] = best_joint_acc
        epoch_data['done'] = 0

        torch.save(epoch_data, epoch_save_path)
        res = json.dumps(epoch_data, indent=2)
        epoch_data_path = os.path.join(output_dir, model_pars['save_json_name'])
        with open(epoch_data_path, 'w') as f:
            f.write(res)

        if epoch - best_epoch > patience:
            epoch_data['done'] = 1
            torch.save(epoch_data, epoch_save_path)
            break

    return None


if __name__ == '__main__':
    torch.manual_seed(106)
    torch.cuda.manual_seed_all(106)

    path = '/gemini/code/rationale-loop/RationaleLoop/params/movies_bert.json'
    with open(path, 'r') as fp:
        logger.info(f'Loading model parameters from {path}')
        model_params = json.load(fp)
        # indent=2: Format the output, wrap(换行) it and indent(缩进) it by two spaces
        logger.info(f'Params: {json.dumps(model_params, indent=2)}')
        train_baseline(model_params)
