import json
import torch
import pickle
from tqdm import tqdm
import argparse
from tokenization import BertTokenizer
import os
from modeling import BertForQuestionAnswering, BertConfig

parser = argparse.ArgumentParser()
# parser.add_argument("--model_dir", default=None, type=str, required=True)
# parser.add_argument("--data_path", default=None, type=str, required=True,
#                     help="存放feature文件的文件夹，包括train.data,dev.data")
parser.add_argument("--max_seq_length", default=512, type=int, )
parser.add_argument("--max_query_length", default=60, type=int, )
parser.add_argument("--predict_example_files", default="./out_put_dir/predict_file_zhidao.dev.json", type=str,
                    required=True)
parser.add_argument("--output_dir", default="./out_put_dir", type=str)

args = parser.parse_args()

args.device = torch.device("cuda", 0)
args.max_para_num = 5


def predict_data(question_text, doc_tokens, tokenizer, max_seq_length, max_query_length):
    """
    输入问题，doc，以及tokenizer，max_seq_length，max_query_length 输出bert的输入数据
    Args:
        question_text:
        doc_tokens:
        tokenizer:
        max_seq_length:
        max_query_length:

    Returns:

    """
    query_tokens = list(question_text)

    if len(query_tokens) > max_query_length:
        query_tokens = query_tokens[0:max_query_length]

    tokens, segment_ids = [], []
    tokens.append("[CLS]")
    segment_ids.append(0)

    for token in query_tokens:
        tokens.append(token)
        segment_ids.append(0)

    tokens.append("[SEP]")
    segment_ids.append(0)

    for i in doc_tokens:
        tokens.append(i)
        segment_ids.append(1)

    tokens.append("[SEP]")
    segment_ids.append(1)

    if len(tokens) > max_seq_length:
        tokens[max_seq_length - 1] = "[SEP]"
        input_ids = tokenizer.convert_tokens_to_ids(tokens[:max_seq_length])  ## !!! SEP
        segment_ids = segment_ids[:max_seq_length]
    else:
        input_ids = tokenizer.convert_tokens_to_ids(tokens)

    input_mask = [1] * len(input_ids)

    assert len(input_ids) == len(segment_ids)

    return (torch.LongTensor(input_ids).unsqueeze(0), torch.LongTensor(input_mask).unsqueeze(0),
            torch.LongTensor(segment_ids).unsqueeze(0))


def find_best_answer_for_passage(start_probs, end_probs, passage_len, question):
    best_start, best_end, max_prob = -1, -1, 0

    start_probs, end_probs = start_probs.unsqueeze(0), end_probs.unsqueeze(0)
    prob_start, best_start = torch.max(start_probs, 1)
    prob_end, best_end = torch.max(end_probs, 1)
    num = 0
    while True:
        if num > 3:
            break
        if best_end >= best_start:
            break
        else:
            start_probs[0][best_start], end_probs[0][best_end] = 0.0, 0.0
            prob_start, best_start = torch.max(start_probs, 1)
            prob_end, best_end = torch.max(end_probs, 1)
        num += 1
    max_prob = prob_start * prob_end

    if best_start <= best_end:
        return (best_start, best_end), max_prob
    else:
        return (best_end, best_start), max_prob


def find_best_answer(sample, start_probs, end_probs, prior_scores=(0.44, 0.23, 0.15, 0.09, 0.07)):
    best_p_idx, best_span, best_score = None, None, 0

    for p_idx, passage in enumerate(sample['doc_tokens'][:args.max_para_num]):

        passage_len = min(args.max_seq_length, len(passage['doc_tokens']))
        answer_span, score = find_best_answer_for_passage(start_probs[p_idx], end_probs[p_idx], passage_len,
                                                          sample['question_text'])

        score *= prior_scores[p_idx]

        answer = "p" + sample['question_text'] + "。" + sample['doc_tokens'][p_idx]['doc_tokens']
        answer = answer[answer_span[0]: answer_span[1] + 1]

        if score > best_score:
            best_score = score
            best_p_idx = p_idx
            best_span = answer_span

    if best_p_idx is None or best_span is None:
        best_answer = ''
    else:
        para = "p" + sample['question_text'] + "。" + sample['doc_tokens'][best_p_idx]['doc_tokens']
        best_answer = ''.join(para[best_span[0]: best_span[1] + 1])

    return best_answer, best_p_idx


def evaluate(model):
    device = args.device
    model = model.to(device)

    # 读取生成的examples文件
    with open(args.predict_example_files, 'rb') as f:
        eval_examples = pickle.load(f)

    with torch.no_grad():
        tokenizer = BertTokenizer.from_pretrained('bert-base-chinese', do_lower_case=True)
        model.eval()
        pred_answers, ref_answers = [], []

        for step, example in enumerate(tqdm(eval_examples)):
            start_probs, end_probs = [], []
            question_text = example['question_text']

            # 遍历每个example的5个问题，进行预测
            for p_num, doc_tokens in enumerate(example['doc_tokens'][:args.max_para_num]):
                (input_ids, input_mask, segment_ids) = \
                    predict_data(question_text, doc_tokens['doc_tokens'], tokenizer, args.max_seq_length,
                                 args.max_query_length)
                # 使用GPU
                input_ids, input_mask, segment_ids = input_ids.to(device), input_mask.to(device), segment_ids.to(device)
                start_prob, end_prob = model(input_ids, segment_ids, attention_mask=input_mask)  # !!!!!!!!!!
                start_probs.append(start_prob.squeeze(0))
                end_probs.append(end_prob.squeeze(0))

            # 由5个结果寻找最佳答案
            best_answer, docs_index = find_best_answer(example, start_probs, end_probs)

            pred_answers.append({'question_id': example['id'],
                                 'question': example['question_text'],
                                 'question_type': example['question_type'],
                                 'answers': [best_answer],
                                 'entity_answers': [[]],
                                 'yesno_answers': []})
            if 'answers' in example:
                ref_answers.append({'question_id': example['id'],
                                    'question_type': example['question_type'],
                                    'answers': example['answers'],
                                    'entity_answers': [[]],
                                    'yesno_answers': []})
        with open(os.path.join(args.output_dir, 'predicts.json'), 'w', encoding='utf-8') as fout:
            for pred_answer in pred_answers:
                fout.write(json.dumps(pred_answer, ensure_ascii=False) + '\n')
        with open(os.path.join(args.output_dir, 'ref.json'), 'w', encoding='utf-8') as fout:
            for pred_answer in ref_answers:
                fout.write(json.dumps(pred_answer, ensure_ascii=False) + '\n')


def eval_all():
    output_model_file = os.path.join(args.output_dir, 'best_model.bin')
    output_config_file = './bert_config/bert_config.json'

    config = BertConfig(output_config_file)
    model = BertForQuestionAnswering(config)
    model.load_state_dict(torch.load(output_model_file))  # , map_location='cpu'))
    # 添加用gpu预测
    # model.to(args.device)  # model.cpu()
    evaluate(model)


# def main():
#     parser = argparse.ArgumentParser()
#     parser.add_argument("--model_dir", default=None, type=str, required=True)
#     parser.add_argument("--data_path", default=None, type=str, required=True,
#                         help="存放feature文件的文件夹，包括train.data,dev.data")
#     parser.add_argument("--max_seq_length", default=512, type=int, )
#     parser.add_argument("--max_query_length", default=60, type=int, )
#
#     parser.add_argument("--output_dir", default="./out_put_dir", type=str, required=True)
#
#     args = parser.parse_args()
#
#     args.device = torch.device("cuda", 0)
#     args.max_para_num = 5
#
#     eval_all()


if __name__ == '__main__':
    eval_all()
