import logging
import os
import sys
import json
import random

from typing import List
from transformers import BertTokenizer
from torch.utils.data import DataLoader
from tqdm import tqdm

from utils.static_object import SentenceEvidence_doc
from datasets.loop_marker_dataset import RationaleTestDataset

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, stream=sys.stdout)


def read_data(base_path, dataset_name, split_data_name):
    # doc_path = base_path + dataset_name + '/docs/'
    doc_path = os.path.join(base_path, dataset_name, 'docs')
    # filepath = base_path + dataset_name + '/' + split_data_name + '.jsonl'
    filepath = os.path.join(base_path, dataset_name, split_data_name + '.jsonl')
    ra_file = open(filepath, 'r', encoding='utf-8')
    # ann_ids = []
    docs = []
    queries = []
    rationales = []
    cla_labels = []
    doc_ids = []
    ra_lines = ra_file.readlines()

    for line in tqdm(ra_lines):
        d_ids = []  # for scifact dataset
        content = json.loads(line)
        query = content['query']
        cls_label = content['classification']
        if dataset_name == 'movies':
            doc_id = content['annotation_id']
        elif dataset_name == 'evidence_inference':
            doc_id = content['docids'][0]
        else:
            doc_id = content['evidences'][0][0]['docid']
        ra_list = []

        # if dataset_name == 'scifact':
        #     doc_list = []
        #     doc_id_list = []
        #     for did in d_ids:
        #         temp_path = os.path.join(doc_path, did)
        #         if os.path.isfile(temp_path):
        #             # print(doc_path)
        #             with open(temp_path, 'r') as f:
        #                 lines = f.readlines()
        #                 lines = [line.strip() for line in lines if line != '' and line is not None]
        #                 doc = '\n'.join(lines)
        #                 doc_list.append(doc)
        #     doc_str = '[SEP]'.join(doc_list)
        #     docs.append(doc_str)
        #     queries.append(query)
        #     cla_labels.append(cls_label)
        #
        #     for ev_group in content['evidences']:
        #         for ev in ev_group:
        #             if len(ev['text']) == 0:
        #                 continue
        #             rationale = ev['text']
        #             ra_list.append(rationale)
        #             doc_id_list.append(ev['docid'])
        #     rationales.append(ra_list)
        #     doc_ids.append(doc_id_list)
        #     assert len(doc_id_list) == len(ra_list)
        # else:
        temp_path = os.path.join(doc_path, doc_id)
        if os.path.isfile(temp_path):
            # print(doc_path)
            with open(temp_path, 'r') as f:
                lines = f.readlines()
                lines = [line.strip() for line in lines if line != '' and line is not None]
                doc = '\n'.join(lines)
            docs.append(doc)
            queries.append(query)
            cla_labels.append(cls_label)
            doc_ids.append(doc_id)

            for ev_group in content['evidences']:
                for ev in ev_group:
                    if len(ev['text']) == 0:
                        continue
                    rationale = ev['text']
                    ra_list.append(rationale)

            rationales.append(ra_list)
        del content
    ra_file.close()
    return doc_ids, docs, queries, rationales, cla_labels


def make_single_instance_for_ide2cls(dataset_name, doc_ids,
                                     docs, queries, rationales, cla_labels) -> List[SentenceEvidence_doc]:
    """

    :param dataset_name:
    :param doc_ids:
    :param docs:
    :param queries:
    :param rationales:
    :param cla_labels:
    :return:
    """
    instance_list = []
    pos_num_total = 0
    neg_num_total = 0
    for i in range(len(cla_labels)):
        doc_sent = docs[i].split('\n')
        pos_num = 0
        neg_num = 0

        # get the preceding and following sentence of the given sent
        def get_context_mo(tokens, type):
            for sen in doc_sent:
                if tokens in sen:
                    sent = sen
                    try:
                        ra_idx = doc_sent.index(sent)
                        if len(doc_sent) > 1:
                            if ra_idx == 0:
                                context = sent + doc_sent[ra_idx + 1]
                            elif ra_idx == len(doc_sent) - 1:
                                context = sent + doc_sent[ra_idx - 1]
                            else:
                                context = doc_sent[ra_idx - 1] + doc_sent[ra_idx + 1]
                        else:
                            context = sent
                        return context
                    except ValueError:
                        logger.error(f'{type}, {sent} is not in the list')
                        return ''

        def get_context_femu(sent, type):
            if len(sent) == 1:
                return ''

            try:
                ra_idx = doc_sent.index(sent)
                if len(doc_sent) > 1:
                    if ra_idx == 0:
                        context = doc_sent[ra_idx + 1]
                    elif ra_idx == len(doc_sent) - 1:
                        context = doc_sent[ra_idx - 1]
                    else:
                        context = doc_sent[ra_idx - 1] + doc_sent[ra_idx + 1]

                else:
                    context = sent
                return context
            except ValueError:
                logger.error(f'{type}, {sent} is not in the list')
                return ''

        # def get_context_evi(sent, type):
        #     sent_list = sent.split('.')
        #     if len(sent_list) > 1:
        #         try:
        #             first_ra_idx = doc_sent.index(sent_list[0])
        #             last_ra_idx = doc_sent.index(sent_list[-1])
        #             if len(doc_sent) > 1:
        #                 if first_ra_idx == 0:
        #                     context = doc_sent[last_ra_idx + 1:]
        #                 elif last_ra_idx == len(doc_sent) - 1:
        #                     context = doc_sent[:first_ra_idx - 1]
        #                 else:
        #                     context = doc_sent[:first_ra_idx - 1] + doc_sent[last_ra_idx + 1:]
        #             else:
        #                 context = ''
        #             return context
        #         except ValueError:
        #             logger.error(f'{type}, {sent} is not in the list')
        #             return ''
        #     else:
        #         try:
        #             ra_idx = doc_sent.index(sent)
        #             if len(doc_sent) > 1:
        #                 if ra_idx == 0:
        #                     context = doc_sent[ra_idx + 1:]
        #                 elif ra_idx == len(doc_sent) - 1:
        #                     context = doc_sent[:ra_idx - 1]
        #                 else:
        #                     context = doc_sent[:ra_idx - 1] + doc_sent[ra_idx + 1:]
        #
        #             else:
        #                 context = ''
        #             return context
        #         except ValueError:
        #             logger.error(f'{type}, {sent} is not in the list')
        #             return ''

        # if dataset_name != 'scifact':
            # 可能有多个rationale,遍历每个rationale
        for j in rationales[i]:
            if dataset_name == 'movies':
                context = get_context_mo(j, 1)
            # elif dataset_name == 'evidence_inference':
            #     context = get_context_evi(j, 1)
            else:
                context = get_context_femu(j, 1)
            instance_list.append(SentenceEvidence_doc(
                doc_id=doc_ids[i],
                context=context,
                query=queries[i],
                rationale=j,
                cls_label=cla_labels[i],
                ra_label=1
            )
            )
            pos_num += 1
        non_rationale_sent = list(set(doc_sent).difference(set(rationales[i])))
        random.shuffle(non_rationale_sent)

        for j in non_rationale_sent:
            if dataset_name == 'movies':
                context = get_context_mo(j, 1)
            # elif dataset_name == 'evidence_inference':
            #     context = get_context_evi(j, 1)
            else:
                context = get_context_femu(j, 1)
            instance_list.append(SentenceEvidence_doc(
                doc_id=doc_ids[i],
                context=context,
                query=queries[i],
                rationale=j,
                cls_label=cla_labels[i],
                ra_label=0
            )
            )
            neg_num += 1

        pos_num_total += pos_num
        neg_num_total += neg_num
        # else:
        #     for idx in range(len(rationales[i])):
        #         ra = rationales[i][idx]
        #         doc_id = doc_ids[i][idx]
        #
        #         context = get_context_sci(ra, 1)
        #         instance_list.append(SentenceEvidence_doc(
        #             doc_id=doc_id,
        #             context=context,
        #             query=queries[i],
        #             rationale=ra,
        #             cls_label=cla_labels[i],
        #             ra_label=1
        #         )
        #         )
        #         pos_num += 1
        #     # non_rationale_sent = list(set(doc_sent).difference(set(rationales[i])))
        #     # random.shuffle(non_rationale_sent)
        #     #
        #     # for idx in range(len(non_rationale_sent[i])):
        #     #     context = get_context_mo(j, 1) if dataset_name == 'movies' else get_context_femu(j, 1)
        #     #     instance_list.append(SentenceEvidence_doc(
        #     #         doc_id=doc_ids[i],
        #     #         context=context,
        #     #         query=queries[i],
        #     #         rationale=j,
        #     #         cls_label=cla_labels[i],
        #     #         ra_label=0
        #     #     )
        #     #     )
        #     #     neg_num += 1
        #
        #     pos_num_total += pos_num
        #     neg_num_total += neg_num

    logger.info(
        'Total instance mount: %s, pos_num: %s, neg_num: %s' % (len(instance_list), pos_num_total, neg_num_total))
    return instance_list


# def make_dataset_for_ide2cls(model_path: str,
#                              base_path: str,
#                              dataset_name: str,
#                              split_dataset_name: str,
#                              max_length: int,
#                              ) -> MarkerRationaleDataset_ide2cls:
#     tokenizer = BertTokenizer.from_pretrained(model_path)
#     doc_ids, docs, queries, rationales, cla_labels = read_data(base_path, dataset_name, split_dataset_name)
#     instance_list = make_single_instance_for_ide2cls(dataset_name, doc_ids, docs, queries, rationales, cla_labels)
#     return MarkerRationaleDataset_ide2cls(instance_list, tokenizer, max_length)


def make_ide_test_dataset(model_path: str,
                          base_path: str,
                          dataset_name: str,
                          split_dataset_name: str,
                          max_length: int,
                          use_context: bool,
                          use_marker: bool
                          ) -> RationaleTestDataset:
    tokenizer = BertTokenizer.from_pretrained(model_path)
    doc_ids, docs, queries, rationales, cla_labels = read_data(base_path, dataset_name, split_dataset_name)
    instance_list = make_single_instance_for_ide2cls(dataset_name, doc_ids, docs, queries, rationales, cla_labels)
    return RationaleTestDataset(instance_list, tokenizer, max_length, use_context, use_marker)


def doc_id_2_doc(base_path, dataset_name, doc_ids):
    doc_path = os.path.join(base_path, dataset_name, 'docs')
    docs = []
    for doc_id in doc_ids:
        temp_path = os.path.join(doc_path, doc_id)
        if os.path.isfile(temp_path):
            with open(temp_path, 'r') as f:
                lines = f.readlines()
                lines = [line.strip() for line in lines if line != '' and line is not None]
                doc = '\n'.join(lines)
            docs.append(doc)
    return docs


# if __name__ == '__main__':
#     tokenizer = BertTokenizer.from_pretrained('/gemini/data-1')
#     a = ['test', 'train', 'val']
#     # a = ['train1']
#     b = ['fever', 'movies', 'multirc']
#     # b = ['fever']
#     base = '/gemini/code/RationaleLoop/data/data/'
#     for i in b:
#         for j in a:
#             logger.info(f'{i} {j}-------------------------------------------------')
#
#             dataset = make_dataset_for_ide2cls('/gemini/data-1', base, i, j, 512)
#
#             loader = DataLoader(dataset, batch_size=10, shuffle=False,
#                                 collate_fn=collate_fn_ide2cls)
#
#             for batch in loader:
#                 print(type(batch))
#                 print(batch)
#                 break
#             del loader
#             del dataset
