import pymysql
import pandas as pd
import jieba
from collections import Counter

# 读取停用词表
def load_stopwords(filepath="stopwordlist.txt"):
    try:
        with open(filepath, "r", encoding="utf-8") as f:
            stopwords = set([line.strip() for line in f])
    except FileNotFoundError:
        print("⚠️ 未找到 stopwords.txt，将不会进行停用词过滤。")
        stopwords = set()
    return stopwords


# 从数据库加载 content 数据
def load_data_from_db():
    conn = pymysql.connect(
        host="47.122.115.21",
        user="root",
        password="ChenEle__1004",
        database="comments_data",
        charset="utf8mb4"
    )

    query = "SELECT content FROM mi_9pro WHERE content IS NOT NULL"
    df = pd.read_sql(query, conn)
    conn.close()

    return df["content"].dropna()


# 统计分词词频
def count_word_frequencies(texts, stopwords):
    word_list = []
    for text in texts:
        words = jieba.lcut(text)  # 分词
        words = [word for word in words if word not in stopwords and word.strip()]  # 去除停用词
        word_list.extend(words)

    word_freq = Counter(word_list)
    return word_freq


# 保存数据到 Excel 和 CSV
def save_word_frequencies(word_freq):
    df = pd.DataFrame(word_freq.items(), columns=["词语", "词频"])
    df = df.sort_values(by="词频", ascending=False)

    # 保存为 CSV 和 Excel
    df.to_csv("word_frequencies.csv", index=False, encoding="utf-8-sig")
    df.to_excel("word_frequencies.xlsx", index=False)
    print("✅ 词频统计已保存为 word_frequencies.csv 和 word_frequencies.xlsx")


def main():
    stopwords = load_stopwords()  # 加载停用词表
    content_texts = load_data_from_db()  # 加载数据库数据
    word_frequencies = count_word_frequencies(content_texts, stopwords)  # 统计词频
    save_word_frequencies(word_frequencies)  # 保存数据


if __name__ == "__main__":
    main()