# 导入必要的库
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from gensim.models import Word2Vec
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA


# re：用于正则表达式操作，比如去除文本中的标点符号。
# nltk：自然语言处理工具包，用于获取停用词和分词。
# gensim：用于训练Word2Vec模型。
# numpy：用于数值计算。
# sklearn：机器学习库，用于聚类和数据预处理。
# matplotlib：用于数据可视化。
# PCA：主成分分析，用于降维以便于可视化。


class TextClustering:
    # 获取每个文本的词嵌入均值
    def get_average_vector(self, text, model, vector_size=100):
        tokens = text.split()
        vector = np.zeros(vector_size)
        count = 0
        for word in tokens:
            if word in model.wv:
                vector += model.wv[word]
                count += 1
        return vector / count if count > 0 else vector

    # 清理文本
    def preprocess_text(self, text):
        text = re.sub(r"[^\w\s]", "", text)  # 去除标点
        text = text.lower()  # 转为小写
        tokens = word_tokenize(text)  # 分词
        tokens = [
            word for word in tokens if word not in stopwords.words("english")
        ]  # 去除停用词
        return " ".join(tokens)

    def __init__(self, filename):
        # 加载数据
        # 打开并读取文件
        file_path = "../InputData/"
        with open(file_path + filename + ".txt", "r", encoding="utf-8") as file:
            lines = file.readlines()  # 按行读取文件

        # 去除换行符，数据集列表
        texts = [line.strip() for line in lines]

        # 读取 txt 文件，每一行为一条记录，数据集dataFrame
        self.data = pd.read_csv(
            file_path + filename + ".txt", sep="/n", header=None, names=["text"]
        )

        # 处理所有文本
        self.processed_texts = [self.preprocess_text(text) for text in texts]

    def word_embedding(self):
        # 词嵌入 (Word2Vec)
        tokenized_texts = [text.split() for text in self.processed_texts]
        word2vec_model = Word2Vec(
            sentences=tokenized_texts, vector_size=100, window=5, min_count=1, workers=4
        )

        # 获取所有文本的嵌入
        text_embeddings = np.array(
            [
                self.get_average_vector(text, word2vec_model)
                for text in self.processed_texts
            ]
        )
        # 模拟特征提取
        features = text_embeddings

        # Step 4: 使用聚类算法 DBSCAN
        # 标准化特征
        scaler = StandardScaler()
        features_scaled = scaler.fit_transform(features)

        # 使用DBSCAN聚类
        dbscan = DBSCAN(eps=10, min_samples=300)
        clusters = dbscan.fit_predict(features_scaled)

        # 将聚类结果映射回原始数据
        self.data["cluster"] = clusters

        # Step 5: 可视化聚类结果
        pca = PCA(n_components=2)
        reduced_features = pca.fit_transform(features_scaled)

        plt.scatter(
            reduced_features[:, 0], reduced_features[:, 1], c=clusters, cmap="viridis"
        )
        plt.title("Text Clustering Visualization")
        plt.xlabel("PCA Dimension 1")
        plt.ylabel("PCA Dimension 2")
        plt.colorbar()
        plt.show()

        # 输出聚类结果
        print(self.data)


if __name__ == "__main__":
    a = TextClustering("imdb_labelled")
    a.word_embedding()
