import pymysql
import jieba
from jieba import analyse
from snownlp import SnowNLP
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import numpy as np

# **1. 连接 MySQL 数据库，读取 `content` 字段**
conn = pymysql.connect(
    host="47.122.115.21",
    user="root",
    password="ChenEle__1004",
    database="comments_data",
    charset="utf8mb4"
)
cursor = conn.cursor()

cursor.execute("SELECT id, content FROM mi_9")
rows = cursor.fetchall()

# 存储分析过程的结果
results = []

# **2. 数据预处理**
data = [row[1] for row in rows if row[1]]  # 过滤空评论
ids = [row[0] for row in rows if row[1]]  # 记录 ID

# **3. 分词**
def tokenize(text):
    words = jieba.lcut(text)
    return words

tokenized_texts = [tokenize(text) for text in data]
tokenized_texts_str = [" ".join(words) for words in tokenized_texts]  # 以空格分隔，适用于 TF-IDF

# **4. 关键词提取（TF-IDF）**
tfidf = TfidfVectorizer(max_features=1000)  # 只保留前 1000 个关键词
X_tfidf = tfidf.fit_transform(tokenized_texts_str)

# 提取每条评论的关键词
keywords_list = []
for text in tokenized_texts_str:
    keywords = analyse.extract_tags(text, topK=5, withWeight=False)  # 提取 5 个关键词
    keywords_list.append(keywords)

# **5. 主题建模（LDA）**
num_topics = 5  # 设置主题数
lda = LatentDirichletAllocation(n_components=num_topics, random_state=42)
X_lda = lda.fit_transform(X_tfidf)

# 输出每个主题的前 10 个关键词
feature_names = tfidf.get_feature_names_out()
topics = []
for topic_idx, topic in enumerate(lda.components_):
    top_words = [feature_names[i] for i in topic.argsort()[:-11:-1]]
    topics.append(" ".join(top_words))
    print(f"主题 {topic_idx+1}: {top_words}")

# 计算每条评论的主题分布
dominant_topics = np.argmax(X_lda, axis=1)  # 找到每条评论最相关的主题

# 提取每条评论的主题词
topic_words_list = []
for idx, topic_id in enumerate(dominant_topics):
    topic_words = topics[topic_id].split(" ")  # 取该主题的关键词
    topic_words_list.append(topic_words[:5])  # 取前 5 个关键词

# **6. 情感分析**
sentiments = [SnowNLP(text).sentiments for text in data]

# **7. 组装数据存入 DataFrame**
df = pd.DataFrame({
    "content": data,
    "jieba_word": [" ".join(words) for words in tokenized_texts],
    "key_word": [" ".join(words) for words in keywords_list],
    "topic_word": [" ".join(words) for words in topic_words_list],
    "sentiment_score": sentiments
})

# **8. 输出 Excel 文件**
output_file = "text_analysis_results.xlsx"
df.to_excel(output_file, index=False)

print(f"数据分析完成，结果已保存为 {output_file}")