import json
# import args
import torch
import pickle
from tqdm import tqdm


def creat_examples(filename_1, filename_2=None, result='./data/predict_examples.pkl'):
    examples = []
    if filename_1 is not None:
        with open(filename_1, 'r', encoding='utf-8') as f:
            for line in tqdm(f.readlines()):

                source = json.loads(line.strip())
                source['doc_tokens'] = []
                for doc in source['documents']:
                    ques_len = len(doc['segmented_title']) + 1
                    clean_doc = "".join(doc['segmented_paragraphs'][doc['most_related_para']][ques_len:])
                    if len(clean_doc) > 4:
                        source['doc_tokens'].append({'doc_tokens': clean_doc})

                example = ({
                    'id': source['question_id'],
                    'question_text': source['question'].strip(),
                    'question_type': source['question_type'],
                    'doc_tokens': source['doc_tokens'],
                    'answers': source['answers']})
                examples.append(example)
    if filename_2 is not None:
        with open(filename_2, 'r', encoding='utf-8') as f:
            for line in tqdm(f.readlines()):
                source = json.loads(line.strip())
                source['doc_tokens'] = []
                for doc in source['documents']:
                    ques_len = len(doc['segmented_title']) + 1
                    clean_doc = "".join(doc['segmented_paragraphs'][doc['most_related_para']][ques_len:])
                    if len(clean_doc) > 4:
                        source['doc_tokens'].append({'doc_tokens': clean_doc})

                if len(source['documents']) == 0:
                    print("error")
                    continue
                example = ({
                    'id': source['question_id'],
                    'question_text': source['question'].strip(),
                    'question_type': source['question_type'],
                    'doc_tokens': source['doc_tokens'],
                    'answers': source['answers']})
                examples.append(example)

    print("{} questions in total".format(len(examples)))
    with open(result, 'wb') as fw:
        pickle.dump(examples, fw)


def creat_test_examples(filename_1):
    """
    创建预测文件的examples bert可用，无需answers
    Args:
        filename_1:

    Returns:

    """
    examples = []
    with open(filename_1, 'r', encoding='utf-8') as f:
        for line in tqdm(f.readlines()):

            source = json.loads(line.strip())
            source['doc_tokens'] = []
            for doc in source['documents']:
                ques_len = len(doc['segmented_title']) + 1
                clean_doc = "".join(doc['segmented_paragraphs'][doc['most_related_para']][ques_len:])
                if len(clean_doc) > 4:
                    source['doc_tokens'].append({'doc_tokens': clean_doc})

            example = ({
                'id': source['question_id'],
                'question_text': source['question'].strip(),
                'question_type': source['question_type'],
                'doc_tokens': source['doc_tokens']})
            examples.append(example)


if __name__ == "__main__":
    # 创建predict文件   --- 这之前是否需要先进行信息抽取  para_extraction ？ 可做实验验证

    style = 'search'

    dev_zhidao_input_file = f"E:\du_reader_epoch_1\pre_pare_data_raw\extr_{style}.dev.json"
    # dev_zhidao_input_file = r"E:\du_reader_epoch_1\pre_pare_data_raw\zhidao.dev.json"
    predict_example_files = f"E:\du_reader_epoch_1\pre_pare_data_raw\predict_file_extr_{style}.dev.pkl"

    creat_examples(filename_1=dev_zhidao_input_file,
                   result=predict_example_files)

    # creat_test_examples(filename_1=r"E:\du_reader_epoch_1\pre_pare_data_raw\extr_search.test.json")
