import pandas as pd
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from kneed import KneeLocator
import imageio.v2 as imageio


def cluster_analysis():
    # 读取 Excel 文件并提取第二列数据
    file_path = '../public/data/tables/data_orin.xlsx'
    data = pd.read_excel(file_path)
    texts = data['text'].astype(str).tolist()

    # 读取停用词表
    def load_stopwords(filepaths):
        stopwords = set()
        for filepath in filepaths:
            with open(filepath, 'r', encoding='utf-8') as file:
                stopwords.update(line.strip() for line in file)
        return stopwords

    # 有多个停用词表文件路径
    stopwords_files = [
        '../public/data/stop_words/baidu_stopwords.txt',
        '../public/data/stop_words/cn_stopwords.txt',
        '../public/data/stop_words/hit_stopwords.txt',
        '../public/data/stop_words/scu_stopwords.txt'
    ]
    stopwords = load_stopwords(stopwords_files)

    # 对每条动态进行分词和清洗
    cleaned_texts = []
    for text in texts:
        words = jieba.lcut(text)
        filtered_words = [word for word in words if word not in stopwords and len(word) > 1]
        cleaned_texts.append(' '.join(filtered_words))

    # 使用 TfidfVectorizer 将文本向量化
    vectorizer = TfidfVectorizer()
    X = vectorizer.fit_transform(cleaned_texts)

    # 使用肘部法确定最佳聚类数量
    def find_best_k(X):
        sse = []
        k_range = range(1, 101)
        for k in k_range:
            kmeans = KMeans(n_clusters=k, random_state=0).fit(X)
            sse.append(kmeans.inertia_)
        plt.plot(k_range, sse, marker='o')
        plt.xlabel('Number of clusters')
        plt.ylabel('SSE')
        plt.title('Elbow Method for Optimal k')
        plt.savefig(f'../public/data/img/SSE.png')

        # 使用 KneeLocator 自动找到肘部
        kl = KneeLocator(k_range, sse, curve='convex', direction='decreasing')
        best_k = kl.elbow
        # 如果最佳聚类数为1，则返回1，而不是0
        best_k = max(1, best_k)
        print(f"Best number of clusters based on the elbow method: {best_k}")
        return best_k

    best_k = find_best_k(X)

    # 使用最佳聚类数量进行聚类分析
    kmeans = KMeans(n_clusters=best_k, random_state=0).fit(X)

    # 获取聚类结果
    labels = kmeans.labels_

    # 展示聚类结果
    for i in range(best_k):
        cluster_texts = [text for text, label in zip(cleaned_texts, labels) if label == i]
        # print(f'Cluster {i}: {" ".join(cluster_texts[:10])}...')  # 只显示部分结果

    bg_pic = imageio.imread('R.png')

    # 绘制词云图展示聚类结果
    for i in range(best_k):
        cluster_texts = ' '.join([text for text, label in zip(cleaned_texts, labels) if label == i])
        wordcloud = WordCloud(mask=bg_pic, background_color='white', scale=1.5, font_path='simhei.ttf').generate(cluster_texts)
        plt.figure()
        plt.imshow(wordcloud, interpolation='bilinear')
        plt.axis('off')
        plt.title(f'Cluster {i}')
        plt.savefig(f'../public/data/img/Cluster_{i}.png')
