import json
import formatOutput
from gensim.summarization import bm25
import textUtil
import keras
import Xdata
import numpy as np
import keras.backend as K

step = 1
value = 0.01740
mark = 0
file_name =['prd1.txt','prd2.txt','prd3.txt','prd4.txt','prd5.txt','prd6.txt','prd7.txt','prd8.txt','prd9.txt','prd10.txt','prd11.txt','prd12.txt','prd13.txt','prd14.txt','prd15.txt','prd16.txt','prd17.txt','prd18.txt','prd19.txt','prd20.txt','prd21.txt','prd22.txt','prd23.txt','prd24.txt','prd25.txt','prd26.txt','prd27.txt','prd28.txt','prd29.txt','prd30.txt','prd31.txt','prd32.txt','prd33.txt','prd34.txt','prd35.txt','prd36.txt','prd37.txt','prd38.txt','prd39.txt','prd40.txt','prd41.txt','prd42.txt','prd43.txt','prd44.txt','prd45.txt','prd46.txt','prd47.txt','prd48.txt','prd49.txt','prd50.txt','prd51.txt','prd52.txt','prd53.txt','prd54.txt','prd55.txt','prd56.txt','prd57.txt','prd58.txt','prd59.txt','prd60.txt','prd61.txt','prd62.txt','prd63.txt','prd64.txt','prd65.txt','prd66.txt','prd67.txt','prd68.txt','prd69.txt','prd70.txt','prd71.txt','prd72.txt','prd73.txt','prd74.txt','prd75.txt','prd76.txt','prd77.txt','prd78.txt','prd79.txt','prd80.txt','prd81.txt','prd82.txt','prd83.txt','prd84.txt','prd85.txt','prd86.txt','prd87.txt','prd88.txt','prd89.txt','prd90.txt','prd91.txt','prd92.txt','prd93.txt','prd94.txt','prd95.txt','prd96.txt','prd97.txt','prd98.txt','prd99.txt','prd100.txt',]
def inferGetSentence(sentenceTokenMat:list, query:list,XdataObj): # 根据query，获取doc中最相关的句子
    bm25Model = bm25.BM25(sentenceTokenMat)
    average_idf = sum(map(lambda k: float(bm25Model.idf[k]), bm25Model.idf.keys())) / len(bm25Model.idf.keys())
    allScore = bm25Model.get_scores(query)
    # 按score对句子排序
    sentenceScorePair = list(zip(sentenceTokenMat, allScore))
    sentenceScorePair.sort(reverse=True, key=lambda i:i[1])
    query = textUtil.tokenListToStr(query)
    query = query.replace(' ', '')
    # 按排序结果筛选前K个
    retSentences = []
    for sentence, score in sentenceScorePair:
        XdataObj.add(score)
        pre=model.predict(np.array([XdataObj.getData()]))
        if pre[0] >=0.5:
            break
        else:
            retSentences.append(sentence)
    return retSentences  # 返回结构为 词-句子 的二维list

def getRecall(y_true, y_pred):
    TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))  # TP
    P=K.sum(K.round(K.clip(y_true, 0, 1)))
    FN = P-TP  # FN=P-TP
    recall = TP / (TP + FN + K.epsilon())  # TP/(TP+FN)
    return recall

def read_json_files(file_path):
    with open(file_path, "r", encoding='utf-8') as f:
        predY = []
        for linenum, line in enumerate(f):
            sample = json.loads(line.strip())
            query = sample['query']
            query = textUtil.getCut(query) # 对query进行处理，如果是句子，调getCut变list，如果本来就是一堆单词，split成list就行

            docStr = sample['doc_text']

            XdataObj= Xdata.Xdata()

            sentenceTokenMat = textUtil.textToSentenceTokenMat(docStr)

            retSentences = inferGetSentence(sentenceTokenMat, query,XdataObj)
            retSentencesStr = formatOutput.formatY(retSentences)
            predY.append(retSentencesStr)  # 保存该数据推断结果

            # data_dict['org_answer'] = sample['org_answer']
            # data_dict['answer_list'] = sample['answer_list']
            # data_dict['answer_start_list'] = sample['answer_start_list']
        formatOutput.formatYList(predY, file_name[mark])

#测试
model = keras.models.load_model('model.h5',custom_objects={'getRecall': getRecall})

while value  <=0.01740:
    read_json_files('C:\\Users\\Bruce Wayne\\Downloads\\data_task1\\train_data\\train.json')
    value += step
    mark += 1
# read_json_files('M:\\AI\\baiduNLP\\train_data\\train.json')