import jieba


# 对一篇文，分词，统计每个词占有的词频（适用于文章提取频率出现高词）
def word_count1(in_path, out_path, ):
    word_count = {}
    f = open(in_path, 'r', encoding='utf8')
    for line in f:
        line = line.strip()
        words = jieba.cut(line, cut_all=False)
        for word in words:
            if word in word_count:  # 将不在字典中的 单词作为  键 放入，它的值是1
                word_count[word] += 1  # 将已在字典中的 单词 的值 加1
            else:
                word_count[word] = 1
    f.close()
    cou = 0
    for i in word_count.keys():
        cou += word_count[i]
    word_count = sorted(word_count.items(), key=lambda x: x[1], reverse=True)
    print(word_count)
    with open(out_path, 'w', encoding='utf8') as f:
        for word_cou in word_count:
            s = word_cou[0] + "   " + str(word_cou[1] / cou) + "\n"
            f.write(s)


# 对每一行，统计每行占有的频率（适用于有很多句重复的短文，例如歌词）
def line_count1(in_path, out_path):
    line_count = {}
    f = open(in_path, 'r', encoding='utf8')
    for line in f:
        line = line.strip()
        if line in line_count:  # 将不在字典中的 单词作为  键 放入，它的值是1
            line_count[line] += 1  # 将已在字典中的 单词 的值 加1
        else:
            line_count[line] = 1
    f.close()
    cou = 0
    for i in line_count.keys():
        cou += line_count[i]
    line_count = sorted(line_count.items(), key=lambda x: x[1], reverse=True)  # 此时的字典已经变成列表
    # [('但愿洗去浮华', 1), ('掸去一身尘灰', 1),......]
    print(line_count)
    with open(out_path, 'w', encoding='utf8') as f:
        for w_count in line_count:
            s = w_count[0] + "   " + str(w_count[1] / cou) + "\n"
            f.write(s)


if __name__ == '__main__':
    word_count1('data/txt/wc.txt',
                'data/txt/wc_count.txt')
    line_count1('data/txt/wc.txt',
                'data/txt/wc_count1.txt')
