import json
import textUtil

from collections import Counter
def fast_precision(text1, text2):
    common_char = Counter(text1) & Counter(text2)
    len_seq1 = len(text1)
    len_seq2 = len(text2)
    len_common = sum(common_char.values())
    if len_common == 0:
        return 0
    return len_common / len_seq2

def tokenListToStr(tokenList:list):
    return ''.join(tokenList)

def calcQueryMatchRatio(query:str, orgAnswer_sentenceTokenMat:list):
    allRatio = []
    for sentence in orgAnswer_sentenceTokenMat:
        if len(sentence)==0:
            continue
        sentence = tokenListToStr(sentence)
        allRatio.append(fast_precision(query, sentence))
    print(allRatio)
    return allRatio  # 返回该答案中所有句子的匹配率

def read_json_files(file_path):
    with open(file_path, "r", encoding='utf-8') as f:
        allRatio = []
        for linenum, line in enumerate(f):
            sample = json.loads(line.strip())

            orgAnswer = sample['org_answer']
            if orgAnswer=='NoAnswer':
                continue
            orgAnswer_sentenceTokenMat = textUtil.textToSentenceTokenMat(orgAnswer)

            query = sample['query']
            query = textUtil.getCut(query)  # 对query进行处理，如果是句子，调getCut变list，如果本来就是一堆单词，split成list就行
            query = tokenListToStr(query)
            query = query.replace(' ','')

            allRatio += calcQueryMatchRatio(query, orgAnswer_sentenceTokenMat)

#测试
read_json_files('C:\\Users\\Bruce Wayne\\Downloads\\data_task1\\train_data\\train.json')