from tensorflow.keras.models import load_model


from bert4keras.tokenizers import Tokenizer,load_vocab

#因为需要加载custom_object
from bert4keras.layers import *

from utils.dataloader import QADataLoader
from utils.bert_info import BertInfo
from utils.generator import ReadingComprehension

from tqdm import tqdm

max_p_len = 256
max_q_len = 64
max_a_len = 32
model_save_path = './best_model'
bert_info_object = BertInfo()
token_dict,keep_tokens = load_vocab(
    dict_path=bert_info_object.dict_path,
    simplified=True,
    startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]']
)
tokenizer = Tokenizer(token_dict,do_lower_case=True)



qa_model = load_model(model_save_path)

qa_data = QADataLoader()
test_data = qa_data.get_train_data()[:100]


reader = ReadingComprehension(
    start_id=None,
    end_id=tokenizer._token_end_id,
    max_a_len= max_a_len,
    max_q_len = max_q_len,
    max_p_len = max_p_len,
    generator_model=qa_model,
    tokenizer=tokenizer
)


def predict_to_file(data, filename, topk=1):
    """将预测结果输出到文件，方便评估
    """
    with open(filename, 'w', encoding='utf-8') as f:
        for d in tqdm(iter(data), desc=u'正在预测(共%s条样本)' % len(data)):
            q_text = d['question']
            p_texts = [p['passage'] for p in d['passages']]
            a = reader.answer(q_text, p_texts, topk)
            if a:
                s = u'%s\t%s\n' % (d['id'], a)
            else:
                s = u'%s\t\n' % (d['id'])
            f.write(s)
            f.flush()

predict_to_file(test_data,'./sumit')