import pandas as pd
import re
import jieba
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from wordcloud import WordCloud
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

plt.rcParams['font.sans-serif'] = ['SimHei']  # 指定黑体（SimHei）
plt.rcParams['axes.unicode_minus'] = False  # 解决负号乱码
plt.rcParams["font.family"] = ["SimHei"]

# ---------------------- 1. 数据加载与探索 ----------------------
df = pd.read_csv("raw_comments.csv", encoding="utf-8")
df_before_clean = df.copy()  # 保存原始数据用于对比
print(f"原始数据量: {len(df)} 条")
print(f"缺失值数量: {df['comment'].isnull().sum()} 条\n")


# ---------------------- 2. 数据清洗 ----------------------
def clean_text(text):
    """文本清洗函数：去除URL、特殊符号、统一空格"""
    text = re.sub(r'http\S+|www\S+', '', text)  # 去除URL
    pattern = re.compile(r'[^\u4e00-\u9fa5a-zA-Z0-9！？。，, ]')  # 保留中文、英文、数字和部分标点
    text = pattern.sub('', text)  # 去除其他符号
    text = re.sub('\s+', ' ', text).strip()  # 合并连续空格并去首尾空格
    return text


df["clean_comment"] = df["comment"].apply(clean_text)
df = df.dropna(subset=["clean_comment"])  # 删除清洗后仍为空的行
print(f"清洗后数据量: {len(df)} 条\n")

# ---------------------- 3. 分词与去停用词 ----------------------
stopfile = r'hit_stopwords.txt'
with open(stopfile, "r", encoding="utf-8") as f:
    stopwords = set(f.read().split())


def tokenize(text):
    """分词+去停用词+过滤单字"""
    tokens = jieba.lcut(text)  # 中文分词
    tokens = [word for word in tokens if word not in stopwords and len(word) > 1]  # 过滤停用词和单字
    return " ".join(tokens)


df["tokens"] = df["clean_comment"].apply(tokenize)


# ---------------------- 4. 清洗结果可视化 ----------------------
def visualize_clean_effect(df_before, df_after, column_before="comment", column_after="tokens"):
    """可视化清洗前后的对比效果"""

    # ---------------------- 子图1：高频词分布对比 ----------------------
    def plot_top_words(data, title, top_n=10):
        words = " ".join(data).split()
        freq = Counter(words).most_common(top_n)
        df_plot = pd.DataFrame(freq, columns=["词语", "频率"])

        plt.bar(df_plot["词语"], df_plot["频率"], color="#FF9999" if "清洗前" in title else "#66CCFF")
        plt.title(title)
        plt.xticks(rotation=45)
        plt.grid(axis="y", alpha=0.3)

    # 清洗前高频词
    plt.figure(figsize=(18, 12))
    plt.subplot(2, 2, 1)
    plot_top_words(df_before[column_before], "清洗前高频词（Top 10）", top_n=10)

    # 清洗后高频词
    plt.subplot(2, 2, 2)
    plot_top_words(df_after[column_after], "清洗后高频词（Top 10）", top_n=10)

    # ---------------------- 子图2：词云图对比 ----------------------
    def generate_wordcloud(texts, title):
        text = " ".join(texts)
        wordcloud = WordCloud(
            font_path="simhei.ttf",  # 中文需指定字体（请根据系统调整路径）
            background_color="white",
            max_words=200,
            width=800, height=400
        ).generate(text)

        plt.imshow(wordcloud)
        plt.title(title)
        plt.axis("off")

    plt.subplot(2, 2, 3)
    generate_wordcloud(df_before[column_before], "清洗前词云图")

    plt.subplot(2, 2, 4)
    generate_wordcloud(df_after[column_after], "清洗后词云图")

    plt.tight_layout()
    plt.show()


# ---------------------- 5. 模型效果验证 ----------------------
def evaluate_model(texts, labels):
    """评估模型在文本数据上的准确率"""
    X = TfidfVectorizer().fit_transform(texts)
    X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.2, random_state=42)
    model = LogisticRegression()
    model.fit(X_train, y_train)
    return accuracy_score(y_test, model.predict(X_test))


# 计算清洗前后的模型准确率
acc_raw = evaluate_model(df_before_clean["comment"], df_before_clean["label"])
acc_clean = evaluate_model(df["tokens"], df["label"])


# ---------------------- 6. 结果汇总与展示 ----------------------
# 打印统计指标
def analyze_text(df, column):
    texts = df[column].tolist()
    word_lengths = [len(text.split()) for text in texts]
    return {
        "平均词数": np.mean(word_lengths),
        "唯一词比例": len(set(" ".join(texts).split())) / max(sum(word_lengths), 1),  # 避免除零错误
    }


stats_before = analyze_text(df_before_clean, "comment")
stats_after = analyze_text(df, "tokens")

print("---------------------- 统计指标对比 ----------------------")
print(f"清洗前平均词数: {stats_before['平均词数']:.2f} | 唯一词比例: {stats_before['唯一词比例']:.4f}")
print(f"清洗后平均词数: {stats_after['平均词数']:.2f} | 唯一词比例: {stats_after['唯一词比例']:.4f}\n")

print("---------------------- 模型性能对比 ----------------------")
print(f"清洗前准确率: {acc_raw:.4f} | 清洗后准确率: {acc_clean:.4f}")
print(f"准确率提升: {acc_clean - acc_raw:.4f}\n")

# 生成可视化报告
visualize_clean_effect(df_before_clean, df, column_before="comment", column_after="tokens")

# 保存清洗后数据
df[["tokens", "label"]].to_csv("cleaned_comments.csv", index=False, encoding="utf-8")