import json
import formatOutput
from gensim.summarization import bm25
import textUtil

def calcBM25(sentenceTokenMat:list, query:list):
    bm25Model = bm25.BM25(sentenceTokenMat)
    allScore = bm25Model.get_scores(query)
    # 按score对句子排序
    sentenceScorePair = list(zip(sentenceTokenMat, allScore))
    sentenceScorePair.sort(reverse=True, key=lambda i: i[1])
    return sentenceScorePair

def isStrInList(_str:str, _list:list, threshold:float):
    for i in _list:
        if textUtil.fast_precision(i, _str) >= threshold:
            return True
    return False

def boundInfer(sentenceScorePair:list, orgAnswer_sentenceTokenMat:list):
    # orgAnswer_sentenceTokenMat里的句子转字符串
    for i in range(len(orgAnswer_sentenceTokenMat)):
        orgAnswer_sentenceTokenMat[i] = textUtil.tokenListToStr(orgAnswer_sentenceTokenMat[i])
    # sentenceScorePair里的句子转字符串和orgAnswer_sentenceTokenMat比较
    orgAnswerMatchThreshold = 0.9
    while True:
        orgAnswerBM25Score = []
        for sentence, score in sentenceScorePair:
            sentence = textUtil.tokenListToStr(sentence)
            if isStrInList(sentence, orgAnswer_sentenceTokenMat, orgAnswerMatchThreshold):
                orgAnswerBM25Score.append(score)
        if len(orgAnswerBM25Score) == 0:
            orgAnswerMatchThreshold -= 0.1
        else:
            break
    # 筛选大于阈值的句子
    threshold = min(orgAnswerBM25Score)
    retSentences = []
    for sentence, score in sentenceScorePair:
        if score <= threshold:
            break
        else:
            retSentences.append(sentence)
    return retSentences

def read_json_files(file_path):
    with open(file_path, "r", encoding='utf-8') as f:
        predY = []
        for linenum, line in enumerate(f):
            sample = json.loads(line.strip())

            orgAnswer = sample['org_answer']
            if orgAnswer=='NoAnswer':
                retSentencesStr = formatOutput.formatY([])
            else:
                orgAnswer_sentenceTokenMat = textUtil.textToSentenceTokenMat(orgAnswer)

                query = sample['query']
                query = textUtil.getCut(query) # 对query进行处理，如果是句子，调getCut变list，如果本来就是一堆单词，split成list就行

                docStr = sample['doc_text']
                doc_sentenceTokenMat = textUtil.textToSentenceTokenMat(docStr)

                sentenceScorePair = calcBM25(doc_sentenceTokenMat, query)
                retSentences = boundInfer(sentenceScorePair, orgAnswer_sentenceTokenMat)
                retSentencesStr = formatOutput.formatY(retSentences)
            predY.append(retSentencesStr)  # 保存该数据推断结果

        formatOutput.formatYList(predY, 'predY.txt')

# 测试
read_json_files('train.json')