# 仅仅提取关键词而已，py文件
import re
import jieba.posseg as pseg
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer

matplotlib.use('TkAgg')


# 读取CSV文件并提取文本内容
def extract_text_from_csv(csv_path, text_column_name, max_rows=3000):
    # 读取CSV文件的前max_rows行
    df = pd.read_csv(csv_path, nrows=max_rows)
    # 假设文本内容在指定列中，提取该列的所有文本内容
    texts = df[text_column_name].dropna().tolist()  # 去除空值并转换为列表
    return texts


def prepare_text(text):
    stopwords = [
        '熟悉', '使用', '操作', '学习', '软件', '进行', '数据', '掌握', '原理', '通过', '办公', '从业', '大学', '从业', '以及', '擅长', '善于', '能够', '能力', '相关', '了解', '系统', '相关'
        , '熟练掌握', '大学生', '机动车', '等级', '运用', '沟通', '获得', '技能', '会计', '法律', '基本', '基础', '具有', '水平', '国家', '全国', '英语', '可以', '具备', '工作', '知识', '专业知识'
        , '文字', '实习生', '负责', '文字', '有限公司', '活动', '中南财经政法大学', '社会', '参与', '期间', '武汉', '暂无', 'xx省', 'xx市', '现在', '省略'
    ]
    tuples = pseg.cut(text, use_paddle=True)
    words = [word for word, flag in tuples if flag not in ['a', 'ad', 'an', 'd', 'q']]
    words = [word for word in words if len(word) > 1]
    words = [word for word in words if word not in stopwords]
    content = ' '.join(words)
    content = ' '.join(re.findall('[\u4e00-\u9fa5]+', content))
    return content


def plot_top_words(model, feature_names, n_top_words, title):
    fig, axes = plt.subplots(1, 3, figsize=(30, 15), sharex=True)
    axes = axes.flatten()
    for topic_idx, topic in enumerate(model.components_):
        top_features_ind = topic.argsort()[-n_top_words:]
        top_features = feature_names[top_features_ind]
        weights = topic[top_features_ind]

        ax = axes[topic_idx]
        ax.barh(top_features, weights, height=0.7)
        plt.rcParams["font.sans-serif"] = ['KaiTi', 'SimHei', 'FangSong']
        ax.set_title(f"Topic {topic_idx + 1}", fontdict={"fontsize": 30})
        ax.tick_params(axis="both", which="major", labelsize=20)
        for i in "top right left".split():
            ax.spines[i].set_visible(False)
        fig.suptitle(title, fontsize=40)

    plt.subplots_adjust(top=0.90, bottom=0.05, wspace=0.90, hspace=0.3)
    plt.show()


# CSV文件路径
csv_path = 'Stu.csv'  # 替换为你的CSV文件路径
text_column_name = 'hjjfbzpqk' #获奖及发表作品情况

# 读取CSV文件内容
texts = extract_text_from_csv(csv_path, text_column_name, max_rows=5000)
prepared_texts = [prepare_text(text) for text in texts]  # 对每条文本进行预处理


# "TFIDF-LDA"
tfidf_vectorizer = TfidfVectorizer()
tfidf = tfidf_vectorizer.fit_transform(prepared_texts)
lda = LatentDirichletAllocation(n_components=3).fit(tfidf)
tfidf_feature_names = tfidf_vectorizer.get_feature_names_out()
plot_top_words(lda, tfidf_feature_names, 20, "TFIDF-LDA")
