import jieba.posseg
from collections import Counter

def cut_para(content):
    # 结束符号，包含中文和英文的
    end_flag = ['?', '!', '？', '！', '。', '…','~']
    else_end_flag = ['，',',','·','、','(',')','（','）','《','》',' ','；','；']

    content_len = len(content)
    sentences = []
    tmp_char = ''
    for idx, char in enumerate(content):
        # 拼接字符
        if char not in end_flag and char not in else_end_flag:
            tmp_char += char

        # 判断是否已经到了最后一位
        if (idx + 1) == content_len:
            sentences.append(tmp_char)
            break

        # 判断此字符是否为结束符号
        if char in end_flag:
            # 再判断下一个字符是否为结束符号，如果不是结束符号，则切分句子
            next_idx = idx + 1
            if not content[next_idx] in end_flag:
                sentences.append(tmp_char)
                tmp_char = ''

    return sentences


def getCut(query):
    a=0
    b=0
    c=0
    for i in query:
        # 如果之前有一个有字符的，并且有空格，判断后面有没有字符
        if i!=' ' and b:
            c=i
        # 如果之前有一有字符的 判断有没有空格
        if i!=' ' and (not a) and (not b):
            a=i
        # 如果之前有没有非空格字符，找找有没有非空格字符
        if i== ' ' and a :
            b=i
        # 判断是否是分开的词语
    if a and b and c:
        return getcut_words(query)
    else:
        return getcut_sentence(query)

# 划分单词
def getcut_words(query):
    return query.split(' ')

# 划分句子
def getcut_sentence(query):
    a = jieba.posseg.cut(query)
    tmp = []
    for i in a :
        # print(i)
        if ('v' in i.flag and i.flag != 'uz') or 'n' in i.flag or 'i' in i.flag or 'm' in i.flag or 'l' in i.flag or 'j' in i.flag or 'b' in i.flag:
            tmp.append(i.word)
    print (tmp)
    return tmp

def textToSentenceTokenMat(docStr:str):
    sentenceList = cut_para(docStr)  # 对doc进行处理，分句（先用句号split，再看看有没有拆不干净的，补充处理一下），该步得到句子组成的list
    sentenceTokenMat = []
    for i in sentenceList:
        tokenList = jieba.lcut(i)  # 对每个句子分词
        sentenceTokenMat.append(tokenList)
    return sentenceTokenMat

def fast_precision(text1, text2):
    common_char = Counter(text1) & Counter(text2)
    len_seq1 = len(text1)
    len_seq2 = len(text2)
    len_common = sum(common_char.values())
    if len_common == 0:
        return 0
    return len_common / len_seq2

def tokenListToStr(tokenList:list):
    return ''.join(tokenList)

def calcQueryMatchRatio(query:str, orgAnswer_sentenceTokenMat:list):
    allRatio = []
    for sentence in orgAnswer_sentenceTokenMat:
        if len(sentence)==0:
            continue
        sentence = tokenListToStr(sentence)
        allRatio.append(fast_precision(query, sentence))
    # print(allRatio)
    return allRatio  # 返回该答案中所有句子的匹配率
