import json
import random
import logging
import sys
import os

from typing import List
from transformers import BertTokenizer
from torch.utils.data import DataLoader
from tqdm import tqdm

from datasets.loop_marker_dataset import RationaleTrainDataset, collate_fn
from utils.static_object import SentenceEvidence

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, stream=sys.stdout)


def read_data(base_path, dataset_name, split_data_name):
    # doc_path = base_path + dataset_name + '/docs/'
    doc_path = os.path.join(base_path, dataset_name, 'docs')
    # filepath = base_path + dataset_name + '/' + split_data_name + '.jsonl'
    filepath = os.path.join(base_path, dataset_name, split_data_name + '.jsonl')
    ra_file = open(filepath, 'r', encoding='utf-8')
    # ann_ids = []
    docs = []
    queries = []
    rationales = []
    cla_labels = []
    ra_lines = ra_file.readlines()

    for line in tqdm(ra_lines):
        # doc_ids = []
        content = json.loads(line)
        query = content['query']
        cls_label = content['classification']
        if dataset_name == 'movies':
            doc_id = content['annotation_id']
        elif dataset_name == 'evidence_inference':
            doc_id = content['docids'][0]
        else:
            doc_id = content['evidences'][0][0]['docid']
        ra_list = []

        # if dataset_name == 'scifact':
        #     doc_list = []
        #     for did in doc_ids:
        #         temp_path = os.path.join(doc_path, did)
        #         if os.path.isfile(temp_path):
        #             # print(doc_path)
        #             with open(temp_path, 'r') as f:
        #                 lines = f.readlines()
        #                 lines = [line.strip() for line in lines if line != '' and line is not None]
        #                 doc = '\n'.join(lines)
        #                 doc_list.append(doc)
        #     doc_str = '[SEP]'.join(doc_list)
        #     docs.append(doc_str)
        #     queries.append(query)
        #     cla_labels.append(cls_label)
        #
        #     for ev_group in content['evidences']:
        #         for ev in ev_group:
        #             if len(ev['text']) == 0:
        #                 continue
        #             rationale = ev['text']
        #             ra_list.append(rationale)
        #     rationales.append(ra_list)
        # else:
        temp_path = os.path.join(doc_path, doc_id)
        if os.path.isfile(temp_path):
            # print(doc_path)
            with open(temp_path, 'r') as f:
                lines = f.readlines()
                lines = [line.strip() for line in lines if line != '' and line is not None]
                doc = '\n'.join(lines)
            docs.append(doc)
            queries.append(query)
            cla_labels.append(cls_label)
            # ann_ids.append(ann_id)

            for ev_group in content['evidences']:
                for ev in ev_group:
                    if len(ev['text']) == 0:
                        continue
                    rationale = ev['text']
                    ra_list.append(rationale)

            rationales.append(ra_list)
        del content
    ra_file.close()
    # logger.error(f'{dataset_name} {split_data_name} gap num: {gap_num}')
    return docs, queries, rationales, cla_labels


def make_single_instance_for_sentence_ide(dataset_name,
                                          docs, queries, rationales, cla_labels,
                                          sample: bool = False) -> List[SentenceEvidence]:
    """
    :param docs:
    :param queries:
    :param rationales:
    :param cla_labels: 最后分类的标签，而非这个sentence是否是rationale
    :param sample: keep pos : neg = 1 : 1 or not
    :return:
    """
    instance_list = []
    pos_num_total = 0
    neg_num_total = 0
    difference_num = 0  # 在一个sample中，可能pos句子的数量大于neg，用以记录差值，并在其他sample补充
    for i in range(len(cla_labels)):
        doc_sent = docs[i].split('\n')
        pos_num = 0
        neg_num = 0

        # get the preceding and following sentence of the given sent
        def get_context_mo(tokens, type):
            for sen in doc_sent:
                if tokens in sen:
                    sent = sen
                    try:
                        ra_idx = doc_sent.index(sent)
                        if len(doc_sent) > 1:
                            if ra_idx == 0:
                                context = sent + doc_sent[ra_idx + 1]
                            elif ra_idx == len(doc_sent) - 1:
                                context = sent + doc_sent[ra_idx - 1]
                            else:
                                context = doc_sent[ra_idx - 1] + doc_sent[ra_idx + 1]
                        else:
                            context = sent
                        return context
                    except ValueError:
                        logger.error(f'{type}, {sent} is not in the list')
                        return ''

        def get_context_femu(sent, type):
            if len(sent) == 1:
                return ''

            try:
                ra_idx = doc_sent.index(sent)
                if len(doc_sent) > 1:
                    if ra_idx == 0:
                        context = doc_sent[ra_idx + 1]
                    elif ra_idx == len(doc_sent) - 1:
                        context = doc_sent[ra_idx - 1]
                    else:
                        context = doc_sent[ra_idx - 1] + doc_sent[ra_idx + 1]

                else:
                    context = sent
                return context
            except ValueError:
                logger.error(f'{type}, {sent} is not in the list')
                return ''

        # def get_context_evi(sent, type):
        #     sent_list = sent.split('.')
        #     if len(sent_list) > 1:
        #         try:
        #             first_ra_idx = doc_sent.index(sent_list[0])
        #             last_ra_idx = doc_sent.index(sent_list[-1])
        #             if len(doc_sent) > 1:
        #                 if first_ra_idx == 0:
        #                     context = doc_sent[last_ra_idx + 1:]
        #                 elif last_ra_idx == len(doc_sent) - 1:
        #                     context = doc_sent[:first_ra_idx - 1]
        #                 else:
        #                     context = doc_sent[:first_ra_idx - 1] + doc_sent[last_ra_idx + 1:]
        #             else:
        #                 context = ''
        #             return context
        #         except ValueError:
        #             logger.error(f'{type}, {sent} is not in the list')
        #             return ''
        #     else:
        #         try:
        #             ra_idx = doc_sent.index(sent)
        #             if len(doc_sent) > 1:
        #                 if ra_idx == 0:
        #                     context = doc_sent[ra_idx + 1:]
        #                 elif ra_idx == len(doc_sent) - 1:
        #                     context = doc_sent[:ra_idx - 1]
        #                 else:
        #                     context = doc_sent[:ra_idx - 1] + doc_sent[ra_idx + 1:]
        #
        #             else:
        #                 context = ''
        #             return context
        #         except ValueError:
        #             logger.error(f'{type}, {sent} is not in the list')
        #             return ''

        # 可能有多个rationale,遍历每个rationale
        for j in rationales[i]:
            if dataset_name == 'movies':
                context = get_context_mo(j, 1)
            # elif dataset_name == 'evidence_inference':
            #     context = get_context_evi(j, 1)
            else:
                context = get_context_femu(j, 1)
            instance_list.append(SentenceEvidence(
                context=context,
                query=queries[i],
                rationale=j,
                cls_label=cla_labels[i],
                ra_label=1
            )
            )
            pos_num += 1
        non_rationale_sent = list(set(doc_sent).difference(set(rationales[i])))
        random.shuffle(non_rationale_sent)
        if sample:
            for sen in non_rationale_sent:
                if neg_num < pos_num:
                    # context = get_context_mo(sen, 1) if dataset_name == 'movies' else get_context_femu(sen, 1)
                    if dataset_name == 'movies':
                        context = get_context_mo(sen, 1)
                    # elif dataset_name == 'evidence_inference':
                    #     context = get_context_evi(sen, 1)
                    else:
                        context = get_context_femu(sen, 1)
                    instance_list.append(SentenceEvidence(
                        # ann_id=ann_ids[i],
                        context=context,
                        query=queries[i],
                        rationale=sen,
                        cls_label=cla_labels[i],
                        ra_label=0
                    )
                    )
                    neg_num += 1
            if neg_num < pos_num:
                difference_num += pos_num - neg_num
            if difference_num > 0:
                for sen in non_rationale_sent[neg_num:]:
                    if difference_num > 0:
                        # context = get_context_mo(sen, 1) if dataset_name == 'movies' else get_context_femu(sen, 1)
                        if dataset_name == 'movies':
                            context = get_context_mo(sen, 1)
                        # elif dataset_name == 'evidence_inference':
                        #     context = get_context_evi(sen, 1)
                        else:
                            context = get_context_femu(sen, 1)
                        instance_list.append(SentenceEvidence(
                            # ann_id=ann_ids[i],
                            context=context,
                            query=queries[i],
                            rationale=sen,
                            cls_label=cla_labels[i],
                            ra_label=0
                        )
                        )
                        difference_num -= 1
                        neg_num += 1

        else:
            for j in non_rationale_sent:
                # context = get_context_mo(j, 1) if dataset_name == 'movies' else get_context_femu(j, 1)
                if dataset_name == 'movies':
                    context = get_context_mo(j, 1)
                # elif dataset_name == 'evidence_inference':
                #     context = get_context_evi(j, 1)
                else:
                    context = get_context_femu(j, 1)
                instance_list.append(SentenceEvidence(
                    # ann_id=ann_ids[i],
                    context=context,
                    query=queries[i],
                    rationale=j,
                    cls_label=cla_labels[i],
                    ra_label=0
                )
                )
                neg_num += 1
        # if difference_num > 0:
        #     print('ann_ids: %s, pos_num: %s, neg_num: %s, difference_num: %s' % (
        #     ann_ids[i], pos_num, neg_num, difference_num))
        pos_num_total += pos_num
        neg_num_total += neg_num

    logger.info(
        'Total instance mount: %s, pos_num: %s, neg_num: %s' % (len(instance_list), pos_num_total, neg_num_total))
    return instance_list


# def make_dataset_for_sentence_ide(model_path: str,
#                                   base_path: str,
#                                   dataset_name: str,
#                                   split_dataset_name: str,
#                                   max_length: int,
#                                   sample: bool = False,
#                                   ) -> MarkerRationaleDataset:
#     tokenizer = BertTokenizer.from_pretrained(model_path)
#     docs, queries, rationales, cla_labels = read_data(base_path, dataset_name, split_dataset_name)
#     instance_list = make_single_instance_for_sentence_ide(dataset_name, docs, queries, rationales, cla_labels, sample)
#     return MarkerRationaleDataset(instance_list, tokenizer, max_length)


def make_ide_train_dataset(model_path: str,
                           base_path: str,
                           dataset_name: str,
                           split_dataset_name: str,
                           max_length: int,
                           sample: bool,
                           use_context: bool,
                           use_marker: bool
                           ) -> RationaleTrainDataset:
    tokenizer = BertTokenizer.from_pretrained(model_path)
    docs, queries, rationales, cla_labels = read_data(base_path, dataset_name, split_dataset_name)
    instance_list = make_single_instance_for_sentence_ide(dataset_name, docs, queries, rationales, cla_labels, sample)
    return RationaleTrainDataset(instance_list, tokenizer, max_length, use_context, use_marker)


if __name__ == '__main__':
    tokenizer = BertTokenizer.from_pretrained('/gemini/code/RationaleLoop/save')
    a = ['test', 'train']
    # a = ['train']
    b = ['fever', 'movies', 'multirc']
    # b = ['movies', 'multirc']
    base = '/gemini/code/RationaleLoop/data/data/'
    for i in b:

        for j in a:
            logger.info(f'{i} {j}-------------------------------------------------')

            docs, queries, rationales, cla_labels = read_data(base, i, j)

            # max_len = 0
            # ave_len = 0
            # total = 0
            # count = 0
            # for k in rationales:
            #     for l in k:
            #         l_len = len(l.split())
            #         if l_len > max_len:
            #             max_len = l_len
            #         total += l_len
            #         count += 1
            # ave_len = total / count
            # print(f'max: {max_len}, ave: {ave_len}')

            instance_list = make_single_instance_for_sentence_ide(i, docs, queries, rationales, cla_labels, True)

            dataset = RationaleTrainDataset(instance_list, tokenizer, 512, True, True)
            #
            # # dataset = make_dataset_for_sentence_ide('/gemini/data-1', '/gemini/code/RationaleLoop/data/eraser_data/', i, j,
            # #                        512, True)
            loader = DataLoader(dataset, batch_size=3, shuffle=False,
                                collate_fn=collate_fn)
            #
            for batch in loader:
                print(type(batch))
                print(batch)
                break
            del loader
            del dataset

    # True: INFO:__main__:Total instance mount: 291466, pos_num: 143310, neg_num: 145733
    # True: INFO:__main__:Total instance mount: 291532, pos_num: 145766, neg_num: 145766
    # False: INFO:__main__:Total instance mount: 1210182, pos_num: 145733, neg_num: 1064449
