# -*- coding: utf-8 -*-

"""
统计词频
"""

import os
import pandas as pd
import jieba
from jieba import posseg as psg

def cut_content(content: str):
    """
    切割文本，剔除停用词。
    
    :param str content: 被处理的文本。
    :return: 切好的列表，其中每个元素是一个词语。
    :rtype: list[str]
    """
    cut = lambda x: [(segment.word, segment.flag) for segment in psg.cut(x)]
    segments = cut(content)
    
    word_list = []
    for segment in segments:
        if segment[1] in ['n', 'nt', 'nz', 'nl', 'v', 'vd', 'vn', 'vf', 'vx', 
                          'vi', 'vl', 'a', 'r', 'd', 'h', 'k'] and len(segment[0]) > 1:
            word_list.append(segment[0])
    
    return word_list

def calculate_word_frequency(word_list: list):
    """
    计算给定列表中的词语出现次数及其词频。
    
    :param list[str] word_list: 给定的词语列表。
    :return: 每个词语的出现次数及频率。
    :rtype: tuple[dict, dict]
    """
    n = len(word_list)
    word_count = dict()
    for word in word_list:
        if word in word_count.keys():
            word_count[word] += 1
        else:
            word_count[word] = 1
    
    word_frequency = dict()
    for word in word_count:
        word_frequency[word] = word_count[word]/n
    
    return word_count, word_frequency

if __name__ == "__main__":
    
    # 加载自定义词
    user_dict_path = "/home/ubuntu/code/git/subject-word-extraction/data/user_dict/"
    jieba.load_userdict(user_dict_path+'user_dict.txt')

    # 加载文档
    with open("/home/ubuntu/code/git/subject-word-extraction/scripts/clean_data/000001_2023_平安银行_2023年年度报告_2024-03-15.txt") as f:
        context = f.read()
    word_list = cut_content("".join(context))
    word_count, word_frequency = calculate_word_frequency(word_list)

    # 关键词
    key_words = ['人工智能', '智能制造', '智慧制造', '主动制造','智能化转型','智能化','商业智能','图像理解',
                 '智能数据分析', '智能机器人', '制造执行系统', '智造', '机器学习', '深度学习', '一体化', '无人化',
                 '互联网技术', '工业互联网']
    reports = []
    frequencies = dict()
    for key_word in key_words:
        frequencies[key_word] = []
        frequencies[key_word + '_词频'] = []
    show_content = "" + '\t'
    for key_word in key_words:
        if key_word in word_count.keys():
            frequencies[key_word].append(word_count[key_word])
            frequencies[key_word + '_词频'].append(word_frequency[key_word])
            show_content += "\n%s:%d" % (key_word, word_count[key_word])
            show_content += '\t'
            show_content += "%s:%f" % (key_word+'_词频', word_frequency[key_word])
        else:
            frequencies[key_word].append(0)
            frequencies[key_word + '_词频'].append(0.0)
            show_content += "\n%s:0\t%s:0.0" % (key_word, key_word+'_词频')
    print(show_content)


