import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
import seaborn as sns
import os
import matplotlib as mpl
from sklearn.decomposition import PCA

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

class UserClusterAnalyzer:
    def __init__(self, data_path):
        """
        初始化用户聚类分析器
        :param data_path: 预处理后的评论数据文件路径
        """
        self.data_path = data_path
        self.df = None
        self.features = None
        self.scaler = StandardScaler()
        self.kmeans = None
        self.n_clusters = None
        
    def load_data(self):
        """加载并预处理数据"""
        # 加载情感分析结果
        sentiment_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 
                                    'sentiment', 'analysis_results', 'sentiment_analysis_results.csv')
        sentiment_df = pd.read_csv(sentiment_path)
        
        # 加载原始评论数据
        original_df = pd.read_csv(self.data_path)
        
        # 合并数据
        self.df = pd.merge(original_df, sentiment_df[['comment_id', 'sentiment_score', 'sentiment']], 
                          on='comment_id', how='left')
        
        # 转换时间格式
        self.df['created_at'] = pd.to_datetime(self.df['created_at'])
        
    def prepare_features(self):
        """准备聚类特征"""
        # 1. 用户活跃度特征
        user_activity = self.df.groupby('user_id').agg({
            'comment_id': 'count',  # 评论数量
            'like_counts': 'mean',  # 平均点赞数
            'reply_counts': 'mean',  # 平均回复数
            'word_count': 'mean'  # 平均评论长度
        }).reset_index()
        
        # 2. 用户情感特征
        user_sentiment = self.df.groupby('user_id').agg({
            'sentiment_score': ['mean', 'std', 'max', 'min']  # 增加极值特征
        }).reset_index()
        user_sentiment.columns = ['user_id', 'sentiment_mean', 'sentiment_std', 'sentiment_max', 'sentiment_min']
        
        # 3. 合并特征
        self.features = pd.merge(user_activity, user_sentiment, on='user_id')
        
        # 4. 创建新的特征组合
        self.features['interaction_score'] = self.features['like_counts'] + self.features['reply_counts']
        self.features['sentiment_stability'] = 1 / (1 + self.features['sentiment_std'])
        self.features['content_quality'] = self.features['word_count'] * self.features['interaction_score']
        self.features['sentiment_range'] = self.features['sentiment_max'] - self.features['sentiment_min']
        
        # 5. 选择最终特征
        selected_features = [
            'comment_id',
            'interaction_score',
            'sentiment_mean',
            'sentiment_stability',
            'content_quality',
            'sentiment_range'
        ]
        
        # 6. 处理缺失值
        for col in selected_features:
            self.features[col] = self.features[col].fillna(self.features[col].median())
        
        # 7. 标准化特征
        self.features[selected_features] = self.scaler.fit_transform(self.features[selected_features])
        
        # 8. 调整特征权重（进一步放大互动、情感分布相关特征的权重）
        weights = {
            'comment_id': 0.8,
            'interaction_score': 2.0,
            'sentiment_mean': 1.5,
            'sentiment_stability': 1.0,
            'content_quality': 1.8,
            'sentiment_range': 1.5
        }
        for feature, weight in weights.items():
            self.features[feature] = self.features[feature] * weight
        
        # 9. PCA降维到2维
        pca = PCA(n_components=2, random_state=42)
        pca_features = pca.fit_transform(self.features[selected_features])
        self.features['pca1'] = pca_features[:, 0]
        self.features['pca2'] = pca_features[:, 1]
        # 只保留user_id和降维特征用于聚类
        self.features = self.features[['user_id', 'pca1', 'pca2']]
        
    def determine_optimal_clusters(self, max_clusters=10):
        """遍历不同聚类数，计算轮廓系数并绘图，自动选取最优聚类数"""
        silhouette_scores = []
        X = self.features.drop('user_id', axis=1)
        for n_clusters in range(2, max_clusters + 1):
            kmeans = KMeans(n_clusters=n_clusters, random_state=42)
            cluster_labels = kmeans.fit_predict(X)
            score = silhouette_score(X, cluster_labels)
            silhouette_scores.append(score)
        # 绘制轮廓系数曲线
        plt.figure(figsize=(10, 6))
        plt.plot(range(2, max_clusters + 1), silhouette_scores, marker='o')
        plt.title('不同聚类数的轮廓系数')
        plt.xlabel('聚类数')
        plt.ylabel('轮廓系数')
        plt.grid(True)
        plt.savefig('silhouette_scores.png', dpi=300, bbox_inches='tight')
        plt.close()
        # 选取最优聚类数
        self.n_clusters = np.argmax(silhouette_scores) + 2
        print(f"最优聚类数: {self.n_clusters}")
        
    def perform_clustering(self):
        """执行聚类分析"""
        self.kmeans = KMeans(n_clusters=self.n_clusters, random_state=42)
        self.features['cluster'] = self.kmeans.fit_predict(self.features.drop('user_id', axis=1))
        
    def analyze_clusters(self):
        """分析聚类结果"""
        # 1. 计算每个聚类的特征统计
        cluster_stats = self.features.groupby('cluster').agg({
            'pca1': ['mean', 'std'],
            'pca2': ['mean', 'std']
        }).round(3)
        
        # 2. 绘制聚类特征分布图（PCA空间）
        plt.figure(figsize=(8, 6))
        sns.scatterplot(x='pca1', y='pca2', hue='cluster', data=self.features, palette='Set1')
        plt.title('PCA降维后聚类分布')
        plt.xlabel('PCA1')
        plt.ylabel('PCA2')
        plt.legend(title='Cluster')
        plt.tight_layout()
        plt.savefig('cluster_pca_scatter.png', dpi=300, bbox_inches='tight')
        plt.close()
        
        return cluster_stats
        
    def save_results(self, cluster_stats):
        """保存分析结果"""
        # 创建结果目录
        os.makedirs('analysis_results', exist_ok=True)
        
        # 保存聚类结果
        self.features.to_csv('analysis_results/user_clusters.csv', index=False)
        
        # 生成分析报告
        with open('analysis_results/cluster_analysis_report.txt', 'w', encoding='utf-8') as f:
            f.write("用户聚类分析报告\n")
            f.write("="*50 + "\n\n")
            
            f.write("1. 聚类数量\n")
            f.write(f"聚类数: {self.n_clusters}\n\n")
            
            f.write("2. 聚类特征统计（PCA空间）\n")
            f.write(str(cluster_stats) + "\n\n")
            
            f.write("3. 聚类描述\n")
            for cluster in range(self.n_clusters):
                cluster_data = self.features[self.features['cluster'] == cluster]
                f.write(f"\n聚类 {cluster}:\n")
                f.write(f"- 用户数量: {len(cluster_data)}\n")
                f.write(f"- PCA1均值: {cluster_data['pca1'].mean():.2f}\n")
                f.write(f"- PCA2均值: {cluster_data['pca2'].mean():.2f}\n")
            
    def run_analysis(self):
        """运行完整的分析流程"""
        print("开始用户聚类分析...")
        self.load_data()
        self.prepare_features()
        self.determine_optimal_clusters()
        self.perform_clustering()
        cluster_stats = self.analyze_clusters()
        self.save_results(cluster_stats)
        print("\n用户聚类分析完成！结果已保存到analysis_results目录。")

if __name__ == "__main__":
    # 使用示例
    analyzer = UserClusterAnalyzer('../../weibo_spider/data_processing/preprocessed_comments.csv')
    analyzer.run_analysis() 