import logging
import os
import sys
import json

from typing import List
from transformers import BertTokenizer
from torch.utils.data import DataLoader

from utils.identifier_data_util import read_data
from utils.static_object import SentenceEvidence
from datasets.loop_marker_dataset import collate_fn, ClassificationDataset

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, stream=sys.stdout)


def make_single_instance_for_classifier(dataset_name,
                                        docs, queries, rationales, cla_labels,
                                        labels: List[str]) -> List[SentenceEvidence]:
    """
        Keep the context as long as possible, with the total length not exceeding 512.
        Make an instance of each rationale only

    :param dataset_name:
    :param docs:
    :param queries:
    :param rationales:
    :param cla_labels:
    :param labels:  eg: ['SUPPORTS', 'REFUTES']
    :return:
    """

    instance_num = 0
    instance_list = []
    for i in range(len(cla_labels)):
        doc_sent = docs[i].split('\n')

        def get_context_mo(tokens, type):
            for sen in doc_sent:
                if tokens in sen:
                    sent = sen
                    try:
                        ra_idx = doc_sent.index(sent)
                        if len(doc_sent) > 1:
                            if ra_idx == 0:
                                context = doc_sent[ra_idx + 1:]
                            elif ra_idx == len(doc_sent) - 1:
                                context = doc_sent[:ra_idx - 1]
                            else:
                                context = doc_sent[:ra_idx - 1] + doc_sent[ra_idx + 1:]
                        else:
                            context = ''
                        return context
                    except ValueError:
                        logger.error(f'{type}, {sent} is not in the list')
                        return ''

        def get_context_femu(sent, type):
            if len(sent) == 1:
                return ''
            try:
                ra_idx = doc_sent.index(sent)
                if len(doc_sent) > 1:
                    if ra_idx == 0:
                        context = doc_sent[ra_idx + 1:]
                    elif ra_idx == len(doc_sent) - 1:
                        context = doc_sent[:ra_idx - 1]
                    else:
                        context = doc_sent[:ra_idx - 1] + doc_sent[ra_idx + 1:]

                else:
                    context = ''
                return context
            except ValueError:
                logger.error(f'{type}, {sent} is not in the list')
                return ''
        # def get_context_evi(sent, type):
        #     sent_list = sent.split('.')[:-1]
        #     sent_list = [sen.strip() for sen in sent_list]
        #     sent_list = [sen + ' .' for sen in sent_list]
        #     if len(sent_list) > 1:
        #         try:
        #             first_ra_idx = doc_sent.index(sent_list[0])
        #             last_ra_idx = doc_sent.index(sent_list[-1])
        #             if len(doc_sent) > 1:
        #                 if first_ra_idx == 0:
        #                     context = doc_sent[last_ra_idx + 1:]
        #                 elif last_ra_idx == len(doc_sent) - 1:
        #                     context = doc_sent[:first_ra_idx - 1]
        #                 else:
        #                     context = doc_sent[:first_ra_idx - 1] + doc_sent[last_ra_idx + 1:]
        #             else:
        #                 context = ''
        #             return context
        #         except ValueError:
        #             logger.error(f'{type}, {sent} is not in the list')
        #             return ''
        #     else:
        #         try:
        #             ra_idx = doc_sent.index(sent)
        #             if len(doc_sent) > 1:
        #                 if ra_idx == 0:
        #                     context = doc_sent[ra_idx + 1:]
        #                 elif ra_idx == len(doc_sent) - 1:
        #                     context = doc_sent[:ra_idx - 1]
        #                 else:
        #                     context = doc_sent[:ra_idx - 1] + doc_sent[ra_idx + 1:]
        #
        #             else:
        #                 context = ''
        #             return context
        #         except ValueError:
        #             logger.error(f'{type}, {sent} is not in the list')
        #             return ''
        # Label mapping
        #   "SUPPORTS": 0, "REFUTES": 1
        label_mapping = dict((y, x) for (x, y) in enumerate(labels))

        for j in rationales[i]:
            if dataset_name == 'movies':
                context = get_context_mo(j, 1)
            # elif dataset_name == 'evidence_inference':
            #     context = get_context_evi(j, 1)
            else:
                context = get_context_femu(j, 1)
            instance_list.append(SentenceEvidence(
                context=context,
                query=queries[i],
                rationale=j,
                cls_label=label_mapping[cla_labels[i]],
                ra_label=1
            )
            )
            instance_num += 1

    # logger.info(f'NUM: {len(instance_list)}')
    return instance_list


def make_cls_dataset(model_path: str,
                     base_path: str,
                     dataset_name: str,
                     split_dataset_name: str,
                     max_length: int,
                     labels: List[str],
                     use_context: bool,
                     use_marker: bool
                     ) -> ClassificationDataset:
    logger.info(f'SPLIT DATASET: {split_dataset_name}')
    tokenizer = BertTokenizer.from_pretrained(model_path)
    docs, queries, rationales, cla_labels = read_data(base_path, dataset_name, split_dataset_name)
    instance_list = make_single_instance_for_classifier(dataset_name, docs, queries, rationales, cla_labels, labels)
    return ClassificationDataset(instance_list, tokenizer, max_length, use_context, use_marker)


if __name__ == '__main__':
    tokenizer = BertTokenizer.from_pretrained('/gemini/data-1')

    a = ['test', 'train']
    b = ['movies', 'multirc', 'fever']
    base = '/gemini/code/RationaleLoop/data/data'
    for i in b:
        for j in a:
            logger.info(f'{i} {j}-------------------------------------------------')
            base_param_path = '/gemini/code/RationaleLoop/params'
            param_path = os.path.join(base_param_path, i + '_bert.json')
            with open(param_path, 'r') as fp:
                logger.info(f'Loading model parameters from {param_path}')
                model_params = json.load(fp)
                labels = model_params['rationale_classifier']['classes']
            # docs, queries, rationales, cla_labels = read_data(base, i, j)
            # instance_list = make_single_instance_for_classifier(i, docs, queries, rationales, cla_labels, labels)
            # dataset = ClassificationDataset(instance_list, tokenizer, 512)

            dataset = make_cls_dataset('/gemini/data-1', base, i, j, 512, labels)

            loader = DataLoader(dataset, batch_size=3, shuffle=False,
                                collate_fn=collate_fn)

            for batch in loader:
                print(type(batch))
                print(batch)
                break
            del loader
            del dataset
