import json
from gensim.summarization import bm25
import textUtil
import numpy as np

allX = []
allY = []
class Xdata:
    def __init__(self):
        self.length = 5
        self.x = np.zeros((self.length,))
        self.sub = 0

    def add(self, val:float):
        if self.sub == self.length:
            self.x[:self.length-1] = self.x[1:]
            self.x[self.length-1] = val
        else:
            self.x[self.sub] = val
            self.sub += 1

    def addToDataset(self):
        print(self.x)
        allX.append(self.x.copy())

def calcBM25(sentenceTokenMat:list, query:list):
    bm25Model = bm25.BM25(sentenceTokenMat)
    allScore = bm25Model.get_scores(query)
    # 按score对句子排序
    sentenceScorePair = list(zip(sentenceTokenMat, allScore))
    sentenceScorePair.sort(reverse=True, key=lambda i: i[1])
    return sentenceScorePair

def isStrInList(_str:str, _list:list, threshold:float):
    for i in _list:
        if textUtil.fast_precision(i, _str) >= threshold:
            return True
    return False

def genData(sentenceScorePair:list, orgAnswer_sentenceTokenMat:list):
    # orgAnswer_sentenceTokenMat里的句子转字符串
    for i in range(len(orgAnswer_sentenceTokenMat)):
        orgAnswer_sentenceTokenMat[i] = textUtil.tokenListToStr(orgAnswer_sentenceTokenMat[i])
    # sentenceScorePair里的句子转字符串和orgAnswer_sentenceTokenMat比较
    orgAnswerMatchThreshold = 0.9
    while True:
        orgAnswerBM25Score = []
        for sentence, score in sentenceScorePair:
            sentence = textUtil.tokenListToStr(sentence)
            if isStrInList(sentence, orgAnswer_sentenceTokenMat, orgAnswerMatchThreshold):
                orgAnswerBM25Score.append(score)
        if len(orgAnswerBM25Score) == 0:
            orgAnswerMatchThreshold -= 0.1
        else:
            break
    # 筛选大于阈值的句子
    threshold = min(orgAnswerBM25Score)
    nowData = Xdata()
    for sentence, score in sentenceScorePair:
        if score <= threshold:  # 停止
            allY.append(1)
            nowData.add(score)
            nowData.addToDataset()
            break
        else:  # 继续
            allY.append(0)
            nowData.add(score)
            nowData.addToDataset()
    allY[len(allY)-1] = 1  # 最后一个数据应为停止

def genDataForNoAnswer(sentenceScorePair:list):
    nowData = Xdata()
    score = sentenceScorePair[0][1]
    nowData.add(score)
    nowData.addToDataset()
    allY.append(1)

def read_json_files(file_path):
    with open(file_path, "r", encoding='utf-8') as f:
        for linenum, line in enumerate(f):
            sample = json.loads(line.strip())

            query = sample['query']
            query = textUtil.getCut(query)  # 对query进行处理，如果是句子，调getCut变list，如果本来就是一堆单词，split成list就行

            docStr = sample['doc_text']
            doc_sentenceTokenMat = textUtil.textToSentenceTokenMat(docStr)
            sentenceScorePair = calcBM25(doc_sentenceTokenMat, query)

            orgAnswer = sample['org_answer']
            if orgAnswer=='NoAnswer':
                genDataForNoAnswer(sentenceScorePair)
            else:
                orgAnswer_sentenceTokenMat = textUtil.textToSentenceTokenMat(orgAnswer)
                genData(sentenceScorePair, orgAnswer_sentenceTokenMat)

read_json_files('train.json')

import pickle
f = open('allX.pickle', 'wb')
pickle.dump(allX, f)
f = open('allY.pickle', 'wb')
pickle.dump(allY, f)