import jieba
import jieba.analyse

def process_count(txt, counts):
    words = jieba.lcut(txt)

    for word in words:
        if len(word) == 1:
            continue
        else:
            counts[word] = counts.get(word, 0) + 1

def process_count_tostr(txt, counts):
    process_count(txt, counts)

    items = list(counts.items())#将键值对转换成列表
    items.sort(key=lambda x: x[1], reverse=True)    # 根据词语出现的次数进行从大到小排序

    ret_str = ''
    for word, count in items:
        ret_str += "{0:<5}{1:>5}".format(word, count) + "\n"

    return ret_str, len(items)

def process_weight(txt):
    lis = jieba.analyse.extract_tags(txt, withWeight=True)
    lis.sort(key=lambda x: x[1], reverse=True)

    return lis

def process_weight_tostr(txt):
    items = process_weight(txt)

    ret_str = ''
    for word, count in items:
        ret_str += "{0:<5}{1:>5}".format(word, count) + "\n"

    return ret_str, len(items)


if __name__ == '__main__':
    file = open(r"F:\workspace_autoit3\导出有道云笔记\2019.1\0101 返程.txt", "r", encoding='utf-8')
    txt = file.read()
    file.close()

    counts = {}
    ret_str, ret_count = process_count_tostr(txt, counts)
    print('%s\n%s\n' % (ret_str, ret_count))

    ret_str, ret_count = process_weight_tostr(txt)
    print('%s\n%s\n' % (ret_str, ret_count))
