import os.path
import json
import logging
import sys
import torch.nn as nn
import torch.optim.optimizer

from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score, precision_recall_fscore_support
from transformers import get_linear_schedule_with_warmup, BertTokenizer
from torch.utils.data import DataLoader

from model.BertClassifier import BertClassifier
from model.MultitaskBERT import MultitaskBERT
from datasets.loop_marker_dataset import collate_fn, ClassificationDataset
from utils.classifier_data_util import make_cls_dataset, make_single_instance_for_classifier
from utils.identifier_data_util import make_ide_train_dataset
from utils.joint_data_util import doc_id_2_doc, make_ide_test_dataset
from utils.pipeline_utils import make_preds_epoch, make_preds_epoch_joint_dataset, make_preds_epoch_joint_dataset_decode

logger = logging.getLogger(__name__, )
logging.basicConfig(level=logging.INFO, stream=sys.stdout)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def inference(inference_parse: dict,
              model_pars: dict):
    model_name = inference_parse['model_name']
    model_dir = inference_parse['model_dir']
    saved_model = inference_parse['saved_model']
    base_dir = model_pars['base_dir']
    labels = model_pars['rationale_classifier']['classes']
    loop = True
    marker = True

    multi_model = MultitaskBERT(model_dir, len(labels), dropout_prob=model_pars['dropout_prob']).to(device)
    tokenizer = BertTokenizer.from_pretrained(model_dir)
    multi_model.load_state_dict(torch.load(saved_model))

    dataset_name = model_pars['dataset_name']
    logger.info(f'DATASET: {dataset_name}')

    cls_test_dataset = make_cls_dataset(model_dir, base_dir,
                                        dataset_name, 'test',
                                        model_pars['max_length'],
                                        labels, loop, marker)

    ide_test_dataset = make_ide_test_dataset(model_dir, base_dir,
                                             dataset_name, 'test',
                                             model_pars['max_length'], loop, marker)
    logger.info(f'IDENTIFIER Dataset: \n'
                f'test: {len(ide_test_dataset)}\n'
                f'CLASSIFIER Dataset: \n'
                f'test: {len(cls_test_dataset)}')

    output_dir = os.path.join(model_pars['output_dir'], dataset_name, model_pars['output_name'])
    os.makedirs(output_dir, exist_ok=True)

    batch_size = model_pars['batch_size']
    ide_criterion = nn.CrossEntropyLoss()
    cls_criterion = nn.CrossEntropyLoss()
    cls_results = {
        'test_loss': None,
        'test_acc': None,
        'test_f1': None
    }
    ide_results = {
        'test_loss': None,
        'test_pre': None,
        'test_recall': None,
        'test_f1': None
    }
    joint_results = {
        'joint_loss': None,
        'joint_acc': None
    }
    results = {
        'model': model_name,
        'dataset': dataset_name,
        'cls_results': None,
        'ide_results': None,
        'joint_results': None,
    }
    with torch.no_grad():
        ra_num = 0
        logger.info('--------------------- identifier ---------------------')
        test_loss, _, test_hard_pred, test_truth, \
            joint_doc_ids, joint_queries, joint_rationales, joint_cls_labels = \
                make_preds_epoch_joint_dataset_decode(multi_model, ide_test_dataset,
                                               batch_size, device, ide_criterion, 'ide')

        test_pre, test_recall, test_f1, _ = precision_recall_fscore_support(test_truth, test_hard_pred,
                                                                            average='macro')

        for i in range(len(test_hard_pred)):
            if test_hard_pred[i] == test_truth[i] == 1:
                ra_num += 1

        ide_results['test_loss'] = test_loss
        ide_results['test_pre'] = test_pre
        ide_results['test_recall'] = test_recall
        ide_results['test_f1'] = test_f1

        # logger.info('--------------------- classifier ---------------------')
        # joint_docs = doc_id_2_doc(base_dir, model_pars['dataset_name'], joint_doc_ids)
        # cls_joint_instance_list = make_single_instance_for_classifier(model_pars['dataset_name'], joint_docs,
        #                                                               joint_queries,
        #                                                               joint_rationales,
        #                                                               joint_cls_labels,
        #                                                               model_pars['rationale_classifier']['classes'])
        # cls_joint_dataset = ClassificationDataset(cls_joint_instance_list, tokenizer,
        #                                           model_pars['max_length'], loop, marker)
        #
        # test_loss, _, test_hard_pred, test_truth = \
        #     make_preds_epoch(multi_model, cls_test_dataset, batch_size, device, cls_criterion, 'cls')
        #
        # test_acc = accuracy_score(test_truth, test_hard_pred)
        # test_f1 = f1_score(test_truth, test_hard_pred, average='macro', zero_division=0)
        # cls_results['test_loss'] = test_loss
        # cls_results['test_acc'] = test_acc
        # cls_results['test_f1'] = test_f1
        #
        # logger.info('--------------------- joint ---------------------')
        # joint_loss, _, joint_hard_pred, joint_truth = \
        #     make_preds_epoch(multi_model, cls_joint_dataset, batch_size, device, cls_criterion, 'cls')
        # correct = 0
        # for i in tqdm(range(len(joint_hard_pred))):
        #     if joint_hard_pred[i] == joint_truth[i]:
        #         correct += 1
        # joint_acc = correct / ra_num
        #
        # joint_results['joint_loss'] = joint_loss
        # joint_results['joint_acc'] = joint_acc
        #
        # results['cls_results'] = cls_results
        # results['ide_results'] = ide_results
        # results['joint_results'] = joint_results

        res = json.dumps(results, indent=2)
        # epoch_data_path = os.path.join(output_dir, inference_parse['save_json_name'])
        # with open(epoch_data_path, 'w') as f:
        #     f.write(res)
        logger.info(res)


if __name__ == '__main__':
    torch.manual_seed(106)
    torch.cuda.manual_seed_all(106)

    inference_pars = {
        'model_name': 'Multi_loop_marker_sample',
        'model_dir': '/gemini/data-1',
        'saved_model': '/gemini/code/RationaleLoop/output/multirc/multi_model.pt',
        'save_json_name': 'Multi_loop_marker_sample.json'
    }

    path = '/gemini/code/RationaleLoop/params/multirc_multibert.json'
    with open(path, 'r') as fp:
        model_params = json.load(fp)
        inference(inference_pars, model_params)
