import re
import os
import json
import jieba
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn import metrics
import joblib

# ------数据读取
# 读数据
data = pd.read_csv('./data/new.csv', encoding='utf-8', header=None)
data.columns = ['news', 'label']

# 文本清洗
data_dup = data.drop_duplicates(subset=['news'])  # 按文本去重
data_clean = data_dup.copy()

# 移除非中文（保留中文、空格）、去除多余空格、小写x替换（更彻底）
data_clean['news'] = data_clean['news'].astype('str').apply(
    lambda x: re.sub(r'[^\u4e00-\u9fa5\s]', '', x)  # 只保留中文和空格
).apply(
    lambda x: re.sub(r'x+', '', x)  # 去除所有x（包括多个x）
).apply(
    lambda x: re.sub(r'\s+', ' ', x).strip()  # 合并空格并去首尾空格
)
# 过滤空文本
data_clean = data_clean[data_clean['news'].str.len() > 5]  # 移除过短文本（小于5字）


# ------分词、停用词
# 停用词
stopwords = set()
# 读停用词表
with open('./data/stopword.txt', 'r', encoding='gbk') as f:
    stopwords.update([line.strip() for line in f if line.strip()])
# 加停用词区
stopwords.update(['的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个',
                  '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这' ,'啦'])
# 加载
jieba.load_userdict('./data/newdic1.txt')  # 假设词典包含"升学率"、"锦标赛"等领域词

# 过滤规则
def tokenize(text):
    words = jieba.cut(text)  # 精确分词
    # 过滤条件：不在停用词表、长度>1、纯中文
    filtered = [word for word in words
                if word not in stopwords
                and len(word) > 1
                and re.fullmatch(r'[\u4e00-\u9fa5]+', word)]  # 确保纯中文
    return filtered
data_clean['tokens'] = data_clean['news'].apply(tokenize)

# 过滤分词后为空的样本
data_clean = data_clean[data_clean['tokens'].apply(len) > 0]


# -------------------------- 3. 标签处理与数据集划分（分层抽样） --------------------------
# 统一标签为数字（与原逻辑一致）
reps = {'教育': '1', '体育': '2', '健康': '3', '旅游': '4'}
data_clean['label'] = data_clean['label'].map(reps).astype(int)  # 转为int类型，避免字符串问题

# 构建语料库（将分词结果拼接为字符串）
corpus = [' '.join(tokens) for tokens in data_clean['tokens']]
labels = data_clean['label'].values

# 分层抽样划分训练集（80%）和测试集（20%），确保类别比例一致
train_corpus, test_corpus, train_labels, test_labels = train_test_split(
    corpus, labels, test_size=0.2, random_state=42, stratify=labels
)


# ------特征提取
# TfidfVectorizer
tfidf = TfidfVectorizer(
    ngram_range=(1, 2),  # 包含单字和双字
    min_df=3,  # 过滤出现次数<2的词（减少噪音）
    max_df=0.96,  # 过滤在95%以上文档中出现的词（如通用词）
    max_features=10000  # 保留top10000个词（控制维度）
)
# 关键修正：训练集fit，测试集仅transform（确保特征空间一致！原代码此处错误）
train_tfidf = tfidf.fit_transform(train_corpus)
test_tfidf = tfidf.transform(test_corpus)  # 用训练集的词汇表转换测试集


# ------降维、选择
# 5.1 特征选择：保留与标签关联性强的特征
selector = SelectKBest(mutual_info_classif, k=3000)  # 选择top3000个特征
train_selected = selector.fit_transform(train_tfidf, train_labels)
test_selected = selector.transform(test_tfidf)
# 5.2 PCA降维：减少维度灾难（保留93%方差）
pca = PCA(n_components=0.93, random_state=42)
train_pca = pca.fit_transform(train_selected.toarray())  # 稠密矩阵
test_pca = pca.transform(test_selected.toarray())

print(f"降维后维度：{train_pca.shape[1]}")


# ------堆量参数区
# KMeans参数：初始化、迭代次数
kmeans = KMeans(
    n_clusters=4,  # 4个类别
    init='k-means++',  # 智能初始化中心
    n_init=100,  # 100次初始化取最优结果
    max_iter=9000,  # 增加迭代次数
    random_state=42,
    algorithm='elkan'  # 高效算法（适用于稠密数据）
)
# 训练
kmeans.fit(train_pca)


# ------数据区
train_pred = kmeans.labels_
print("\n训练集指标：")
print(f"ARI: {metrics.adjusted_rand_score(train_labels, train_pred):.4f}")
print(f"AMI: {metrics.adjusted_mutual_info_score(train_labels, train_pred):.4f}")
print(f"调和平均: {metrics.v_measure_score(train_labels, train_pred):.4f}")

test_pred = kmeans.predict(test_pca)
print("\n测试集指标：")
print(f"ARI: {metrics.adjusted_rand_score(test_labels, test_pred):.4f}")
print(f"AMI: {metrics.adjusted_mutual_info_score(test_labels, test_pred):.4f}")
print(f"调和平均: {metrics.adjusted_mutual_info_score(test_labels, test_pred):.4f}")
