import os
import jieba
import pandas as pd
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei']

# 加载停用词表
stopwords = []
with open('thesaurus/stop_word.txt', 'r', encoding='utf-8') as f:
    for line in f:
        stopwords.append(line.strip())

# 加载自定义词典
jieba.load_userdict('thesaurus/user_word.txt')
# 读取xlsx文件
df = pd.concat([pd.read_excel('document/' + file_name) for file_name in os.listdir('document/') if file_name.endswith('.xlsx')])
# 遍历document文件夹中的xlsx文件
def remove_float(x):
    if isinstance(x, float):
        return ''
    else:
        return x

def cut_words(text):
    words = jieba.cut(text)
    return [word for word in words if word not in stopwords and len(word) > 1]

# 定义计算主题强度函数
def calculate_topic_strength(df, topic):
    # 将content列文本用jieba进行分词并删除停用词
    df['content'] = df['content'].apply(remove_float)
    df['words'] = df['content'].apply(cut_words)

    # 计算主题词出现的次数
    topic_count = Counter([word for words in df['words'] for word in words if word == topic])
    # 计算总词数
    total_count = sum([len(words) for words in df['words']])
    # 计算主题强度
    topic_strength = topic_count[topic] / total_count
    return topic_strength

# 计算主题强度并绘制可视化图表、生成csv文件和词云图
for i, filename in enumerate(os.listdir('document')):
    if filename.endswith('.xlsx'):
        # 获取新文件名
        new_title = f'{filename[:-5]}_{i}'
        # 读取xlsx文件
        df = pd.read_excel(f'document/{filename}')
        # 计算主题强度
        economic_strength = calculate_topic_strength(df, '经济')
        livelihood_strength = calculate_topic_strength(df, '民生')
        education_strength = calculate_topic_strength(df, '教育')
        society_strength = calculate_topic_strength(df, '社会')
        military_strength = calculate_topic_strength(df, '军事')
        culture_strength = calculate_topic_strength(df, '文化')
        technology_strength = calculate_topic_strength(df, '科技')
        politics_strength = calculate_topic_strength(df, '政治')


        plt.title(f'词云图 - {new_title}')



        # 生成词云图
        words_list = []
        for words in df['words']:
            if isinstance(words, list):
                words_str = ' '.join(words)
                words_list.append(words_str)
            else:
                words_list.append(words)
        df['words'] = words_list
        text = ' '.join(df['words'])
        wordcloud = WordCloud(font_path="./simhei.ttf", background_color='white', width=800, height=600).generate(text)
        plt.imshow(wordcloud, interpolation='bilinear')
        plt.axis('off')
        plt.savefig(f'wordcloud/-{new_title}.png')


