# -*-coding:utf-8 -*-

'''
@File       : word_discovery.py
@Author     : HW Shen
@Date       : 2020/7/20
@Desc       : 引入状态转移概率
'''


import kenlm

model = kenlm.Model('weixin.klm')
from math import log10

# “状态转移概率” 是人工总结的，总的来说，就是要降低长词的可能性。结果是实测出来的，规律是逐步下降，然后调试估计即可。
# 一般来说，估计对数量级即可
trans = {'bb': 1, 'bc': 0.15, 'cb': 1, 'cd': 0.01, 'db': 1, 'de': 0.01, 'eb': 1, 'ee': 0.001}
trans = {i:log10(j) for i, j in trans.iteritems()}


def viterbi(nodes):
    """ 维特比算法 """

    paths = nodes[0]
    for l in range(1, len(nodes)):
        paths_ = paths
        paths = {}
        for i in nodes[l]:
            nows = {}
            for j in paths_:
                if j[-1]+i in trans:
                    nows[j+i] = paths_[j]+nodes[l][i]+trans[j[-1]+i]
            k = nows.values().index(max(nows.values()))
            paths[nows.keys()[k]] = nows.values()[k]
    return paths.keys()[paths.values().index(max(paths.values()))]


def cp(s):
    return (model.score(' '.join(s), bos=False, eos=False) - model.score(' '.join(s[:-1]), bos=False, eos=False)) or -100.0


def mycut(s):

    nodes = [{'b':cp(s[i]), 'c':cp(s[i-1:i+1]), 'd':cp(s[i-2:i+1]), 'e':cp(s[i-3:i+1])} for i in range(len(s))]
    tags = viterbi(nodes)
    words = [s[0]]
    for i in range(1, len(s)):
        if tags[i] == 'b':
            words.append(s[i])
        else:
            words[-1] += s[i]
    return words


if __name__ == '__main__':
    pass
