import os.path
import json
import logging
import sys
import torch.nn as nn
import torch.optim.optimizer

from sklearn.metrics import precision_recall_fscore_support
from model.BertClassifier import BertClassifier
from utils.joint_data_util import make_ide_test_dataset
from utils.pipeline_utils import make_preds_epoch_joint_dataset_decode

logger = logging.getLogger(__name__, )
logging.basicConfig(level=logging.INFO, stream=sys.stdout)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def inference(inference_parse: dict,
              model_pars: dict):
    model_name = inference_parse['model_name']
    model_dir = inference_parse['model_dir']
    saved_model = inference_parse['saved_model']
    base_dir = model_pars['base_dir']
    loop = False
    marker = False
    decode_path = inference_parse['decode_path']

    ide_model = BertClassifier(model_dir, 2, dropout_prob=model_pars['dropout_prob']).to(device)
    ide_model.load_state_dict(torch.load(saved_model))

    dataset_name = model_pars['dataset_name']
    logger.info(f'DATASET: {dataset_name}')

    ide_test_dataset = make_ide_test_dataset(model_dir, base_dir,
                                             dataset_name, 'test',
                                             model_pars['max_length'], loop, marker)
    logger.info(f'IDENTIFIER Dataset: \n'
                f'test: {len(ide_test_dataset)}\n')

    output_dir = os.path.join(model_pars['output_dir'], dataset_name, model_pars['output_name'])
    os.makedirs(output_dir, exist_ok=True)

    batch_size = model_pars['batch_size']
    ide_criterion = nn.CrossEntropyLoss()
    ide_results = {
        'test_loss': None,
        'test_pre': None,
        'test_recall': None,
        'test_f1': None
    }
    results = {
        'model': model_name,
        'dataset': dataset_name,
        'ide_results': None,
    }
    with torch.no_grad():
        ra_num = 0
        logger.info('--------------------- identifier ---------------------')
        test_loss, _, test_hard_pred, test_truth, \
            joint_doc_ids, joint_queries, joint_rationales, joint_cls_labels = \
                make_preds_epoch_joint_dataset_decode(ide_model, ide_test_dataset,
                                               batch_size, device, ide_criterion, task_name=None, decode_path=decode_path)

        test_pre, test_recall, test_f1, _ = precision_recall_fscore_support(test_truth, test_hard_pred,
                                                                            average='macro')

        for i in range(len(test_hard_pred)):
            if test_hard_pred[i] == test_truth[i] == 1:
                ra_num += 1

        ide_results['test_loss'] = test_loss
        ide_results['test_pre'] = test_pre
        ide_results['test_recall'] = test_recall
        ide_results['test_f1'] = test_f1


        res = json.dumps(results, indent=2)
        logger.info(res)


if __name__ == '__main__':
    torch.manual_seed(106)
    torch.cuda.manual_seed_all(106)

    inference_pars = {
        'model_name': 'B2B_baseline',
        'model_dir': '/gemini/data-1',
        'saved_model': '/gemini/code/rationale-loop/RationaleLoop/output/movies/B2B_baseline/identifier.pt',
        'decode_path': '/gemini/code/rationale-loop/RationaleLoop/output/movies/B2B_baseline'
    }

    path = '/gemini/code/rationale-loop/RationaleLoop/params/movies_bert.json'
    with open(path, 'r') as fp:
        model_params = json.load(fp)
        inference(inference_pars, model_params)
