import json
import random

from transformers import BertTokenizer


def read_squad_examples(zhidao_input_file, search_input_file, is_training=True):
    examples = []
    with open(search_input_file, 'r', encoding='utf-8') as f:

        for line in f.readlines():
            source = json.loads(line.strip())
            if len(source['documents']) == 0:
                continue
            # 答案不存在的时候
            if (len(source['answer_spans']) == 0) or source['answers'] == []:
                continue
            if source['match_scores'][0] < 0.8:
                continue
            # if source['answer_spans'][0][1] > args.max_seq_length:
            #     continue

            answer_index = source['best_answer_index'][0]
            docs_index = source['answer_docs'][0]

            # start_id = source['answer_spans'][0][0]
            # end_id = source['answer_spans'][0][1] + 1  # 左闭右开的原因，需要给end+1
            question_type = source['question_type']

            try:
                answer_passage_idx = source['documents'][docs_index]['most_related_para']
            except:
                continue

            doc_tokens = source['documents'][docs_index]['segmented_paragraphs'][answer_passage_idx]

            # if start_id >= end_id or end_id > len(doc_tokens) or start_id >= len(doc_tokens):
            #     continue

            # new_doc_tokens = ""
            # new_start_id=0
            # for idx, token in enumerate(doc_tokens):
            #     if idx == start_id:
            #         new_start_id = len(new_doc_tokens)
            #         break
            #     new_doc_tokens = new_doc_tokens + token

            new_doc_tokens = "".join(doc_tokens)
            # new_end_id = new_start_id + len(source['fake_answers'][0])
            #
            # if source['fake_answers'][0] != "".join(new_doc_tokens[new_start_id:new_end_id]):
            #     continue

            if is_training:
                # new_end_id = new_end_id - 1
                example = {
                    "qas_id": source['question_id'],
                    "question_text": source['question'].strip(),
                    "question_type": question_type,
                    "doc_tokens": new_doc_tokens.strip(),
                    "can_answer": 1,
                    "start_position": 0,    # new_start_id,
                    "end_position": 0,      # new_end_id,
                    "answer": source['answers'][answer_index].strip()}

                examples.append(example)

    with open(zhidao_input_file, 'r', encoding='utf-8') as f:
        for line in f.readlines():

            source = json.loads(line.strip())
            if len(source['documents']) == 0:
                continue
            # 答案不存在的时候
            if (len(source['answer_spans']) == 0) or source['answers'] == []:
                continue

            if source['match_scores'][0] < 0.8:
                continue
            # if source['answer_spans'][0][1] > args.max_seq_length:
            #     continue

            docs_index = source['answer_docs'][0]
            answer_index = source['best_answer_index'][0]

            # start_id = source['answer_spans'][0][0]
            # end_id = source['answer_spans'][0][1] + 1  # 左闭右开
            question_type = source['question_type']

            try:
                answer_passage_idx = source['documents'][docs_index]['most_related_para']
            except:
                continue

            doc_tokens = source['documents'][docs_index]['segmented_paragraphs'][answer_passage_idx]

            # if start_id >= end_id or end_id > len(doc_tokens) or start_id >= len(doc_tokens):
            #     continue

            # new_doc_tokens = ""
            # new_start_id = 0
            # for idx, token in enumerate(doc_tokens):
            #     if idx == start_id:
            #         new_start_id = len(new_doc_tokens)
            #         break
            #     new_doc_tokens = new_doc_tokens + token

            new_doc_tokens = "".join(doc_tokens)
            # new_end_id = new_start_id + len(source['fake_answers'][0])

            # if source['fake_answers'][0] != "".join(new_doc_tokens[new_start_id:new_end_id]):
            #     continue

            if is_training:
                # new_end_id = new_end_id - 1
                example = {
                    "qas_id": source['question_id'],
                    "question_text": source['question'].strip(),
                    "question_type": question_type,
                    "doc_tokens": new_doc_tokens.strip(),
                    "can_answer": 1,
                    "start_position": 0,    # new_start_id,
                    "end_position": 0,      # new_end_id,
                    "answer": source['answers'][answer_index].strip()}

                examples.append(example)

    print("len(examples):", len(examples))
    return examples


def convert_examples_to_features(examples, tokenizer, max_seq_length, max_query_length, max_ans_length):
    features = []

    for example in examples:

        query_tokens = list(example['question_text'])

        doc_tokens = example['doc_tokens']
        doc_tokens = doc_tokens.replace(u"“", u"\"")
        doc_tokens = doc_tokens.replace(u"”", u"\"")
        start_position = example['start_position']
        end_position = example['end_position']
        can_answer = example['can_answer']

        # 对答案进行处理
        answer = example['answer']
        answer = answer.replace(u"“", u"\"")
        answer = answer.replace(u"”", u"\"")
        answer_tokens = list(answer)
        # 在答案的开始位置增加一个[GO]
        answer_tokens.insert(0, "[GO]")
        answer_tokens.append("[EOS]")

        if len(answer_tokens) > max_ans_length:
            answer_tokens = answer_tokens[0:max_ans_length - 1]
            answer_tokens.append("[EOS]")

        answer_ids = tokenizer.convert_tokens_to_ids(answer_tokens)
        answer_mask = [1] * len(answer_ids)

        if len(query_tokens) > max_query_length:
            query_tokens = query_tokens[0:max_query_length]

        tokens = []
        tokens_q = []
        segment_ids = []

        tokens.append("[CLS]")
        segment_ids.append(0)
        # if start_position != -1:
        #     start_position = start_position + 1
        #     end_position = end_position + 1

        for token in query_tokens:
            tokens_q.append(token)
            tokens.append(token)
            segment_ids.append(0)
            # if start_position != -1:
            #     start_position = start_position + 1
            #     end_position = end_position + 1

        tokens.append("[SEP]")
        segment_ids.append(0)
        # if start_position != -1:
        #     start_position = start_position + 1
        #     end_position = end_position + 1

        for i in doc_tokens:
            tokens.append(i)
            segment_ids.append(1)
        tokens.append("[SEP]")
        segment_ids.append(1)

        if end_position >= max_seq_length:
            continue

        if len(tokens) > max_seq_length:
            tokens[max_seq_length - 1] = "[SEP]"
            input_ids = tokenizer.convert_tokens_to_ids(tokens[:max_seq_length])  ## !!! SEP
            segment_ids = segment_ids[:max_seq_length]
        else:
            input_ids = tokenizer.convert_tokens_to_ids(tokens)

        input_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)

        input_mask = [1] * len(input_ids)
        input_mask_q = [1] * len(input_ids_q)
        assert len(input_ids) == len(segment_ids)
        assert len(input_ids_q) == len(input_mask_q)

        features.append(
            {"input_ids": input_ids,
             "input_ids_q": input_ids_q,
             "input_mask": input_mask,
             "input_mask_q": input_mask_q,
             "segment_ids": segment_ids,
             "can_answer": can_answer,
             "start_position": start_position,
             "end_position": end_position,
             "answer_ids": answer_ids,
             "answer_mask": answer_mask})

    print("len(features):", len(features))
    with open("../../data/train.data", 'w', encoding="utf-8") as fout:
        for feature in features:
            fout.write(json.dumps(feature, ensure_ascii=False) + '\n')

    return features


if __name__ == "__main__":
    zhidao_input_file = '/Users/qianqian/Documents/nlp/mrc/pgnet/data/extracted/trainset/zhidao.train.json'
    search_input_file = '/Users/qianqian/Documents/nlp/mrc/pgnet/data/extracted/trainset/search.train.json'
    max_seq_length = 512
    max_ans_length = 512
    max_query_length = 60
    tokenizer = BertTokenizer.from_pretrained('../../model_parameter/roberta_wwm_ext_origin', do_lower_case=True)
    # 生成训练数据， train.data
    # print(len(tokenizer.vocab))     # 21128, the number of tne vocab
    examples = read_squad_examples(zhidao_input_file=zhidao_input_file,
                                   search_input_file=search_input_file)
    features = convert_examples_to_features(examples=examples, tokenizer=tokenizer,
                                            max_seq_length=max_seq_length, max_query_length=max_query_length,
                                            max_ans_length=max_ans_length)

    # 生成验证数据， dev.data。记得注释掉生成训练数据的代码，并在302行将train.data改为dev.data
    # examples = read_squad_examples(zhidao_input_file=args.dev_zhidao_input_file,
    #                                search_input_file=args.dev_search_input_file)
    # features = convert_examples_to_features(examples=examples, tokenizer=tokenizer,
    #                                         max_seq_length=args.max_seq_length, max_query_length=args.max_query_length,
    #                                         max_ans_length=args.max_ans_length)
