import os.path
import json
import torch.optim.optimizer
from transformers import BertTokenizer
from transformers import get_linear_schedule_with_warmup
from model.BertClassifier import BertClassifier
import torch.nn as nn
from utils.dataset import collate_fn_ide2cls, ClassificationDataset, collate_fn
from utils.joint_data_util import make_dataset_for_ide2cls, doc_id_2_doc
from utils.classifier_data_util import make_single_instance_for_classifier, make_dataset_for_sentence_cls
from torch.utils.data import DataLoader
import logging
import sys
from collections import OrderedDict
from tqdm import tqdm
from utils.pipeline_utils import make_preds_epoch_ide2cls, make_preds_epoch, make_preds_epoch_joint_dataset
from sklearn.metrics import precision_recall_fscore_support, accuracy_score, f1_score

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, stream=sys.stdout)

path = '/gemini/code/RationaleLoop/params/movies_ide_2_cls.json'
with open(path, 'r') as fp:
    model_pars = json.load(fp)
labels = model_pars['rationale_classifier']['classes']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
identifier = BertClassifier(model_dir=model_pars['model_dir'], num_labels=2).to(device)
classifier = BertClassifier(model_dir=model_pars['model_dir'], num_labels=len(labels)).to(device)
tokenizer = BertTokenizer.from_pretrained(model_pars['model_dir'])
identifier.load_state_dict(torch.load('/gemini/code/RationaleLoop/output/movies/ide_2_cls/identifier.pt'))
classifier.load_state_dict(torch.load('/gemini/code/RationaleLoop/output/movies/ide_2_cls/classifier.pt'))

ide_test_dataset = make_dataset_for_ide2cls(model_pars['model_dir'], model_pars['base_dir'],
                                            model_pars['dataset_name'], 'test',
                                            model_pars['max_length'])


cls_test_dataset = make_dataset_for_sentence_cls(model_pars['model_dir'], model_pars['base_dir'],
                                                 model_pars['dataset_name'], 'test',
                                                 model_pars['max_length'], labels)

ide_criterion = nn.CrossEntropyLoss()
cls_criterion = nn.CrossEntropyLoss()

with torch.no_grad():
    logger.info(f'TESTING.....')
    test_loss, _, test_hard_pred, test_truth = \
        make_preds_epoch(classifier, cls_test_dataset, 16, device, cls_criterion)

    test_acc = accuracy_score(test_truth, test_hard_pred)
    test_f1 = f1_score(test_truth, test_hard_pred, average='macro', zero_division=0)

    logger.info(
        f'----------------CLS----------------\n'
        f'loss {test_loss}, \n'
        f'acc: {test_acc},\n'
        f'f1: {test_f1}')

    ide_test_loss, _, ide_test_hard_pred, ide_test_truth, \
        joint_doc_ids, joint_queries, joint_rationales, joint_cls_labels = \
            make_preds_epoch_joint_dataset(identifier, ide_test_dataset, 16, device, ide_criterion)

    joint_docs = doc_id_2_doc(model_pars['base_dir'], model_pars['dataset_name'], joint_doc_ids)
    cls_joint_instance_list = make_single_instance_for_classifier(model_pars['dataset_name'], joint_docs,
                                                                  joint_queries,
                                                                  joint_rationales,
                                                                  joint_cls_labels,
                                                                  model_pars['rationale_classifier']['classes'])
    cls_joint_dataset = ClassificationDataset(cls_joint_instance_list, tokenizer, model_pars['max_length'])

    joint_loss, _, joint_hard_pred, joint_truth = \
        make_preds_epoch(classifier, cls_joint_dataset, 16, device, cls_criterion)

    correct = 0
    ra = 0
    for i in tqdm(range(len(joint_hard_pred))):
        if joint_hard_pred[i] == joint_truth[i]:
            correct += 1

    for i in tqdm(range(len(ide_test_hard_pred))):
        if ide_test_hard_pred[i] == ide_test_truth[i] == 1:
            ra += 1


    joint_acc = correct / ra

    logger.info(f'joint_dataset length: {ra}\n'
                f'correct: {correct}')

    logger.info(
        f'----------------JOINT----------------\n'
        f'loss {joint_loss}\n'
        f'acc: {joint_acc}')

