import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
import seaborn as sns
import os
from datetime import datetime, timedelta

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class UserClusterAnalyzer:
    def __init__(self, data_path):
        """
        初始化用户聚类分析器
        :param data_path: 预处理后的评论数据文件路径
        """
        self.data_path = data_path
        self.df = None
        self.features = None
        self.scaler = StandardScaler()
        self.kmeans = None
        self.n_clusters = None
        
    def load_data(self):
        """加载并预处理数据"""
        # 加载情感分析结果
        sentiment_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 
                                    'sentiment', 'analysis_results', 'sentiment_analysis_results.csv')
        sentiment_df = pd.read_csv(sentiment_path)
        
        # 加载原始评论数据
        self.df = pd.read_csv(self.data_path)
        
        # 合并数据
        self.df = pd.merge(self.df, sentiment_df[['comment_id', 'sentiment_score']], 
                          on='comment_id', how='left')
        
        # 转换时间格式
        self.df['created_at'] = pd.to_datetime(self.df['created_at'])
        
    def prepare_features(self):
        """准备聚类特征"""
        # 1. 计算用户活跃度（单位时间内的评论频率）
        user_activity = self.df.groupby('user_id').agg({
            'created_at': lambda x: len(x) / ((x.max() - x.min()).total_seconds() / 3600 + 1),  # 每小时评论数
            'comment_id': 'count',  # 总评论数
            'like_counts': 'mean',  # 平均点赞数
            'reply_counts': 'mean',  # 平均回复数
            'word_count': 'mean',  # 平均评论长度
            'sentiment_score': ['mean', 'std']  # 情绪均值和标准差
        }).reset_index()
        
        # 重命名列
        user_activity.columns = ['user_id', 'comments_per_hour', 'total_comments', 
                               'avg_likes', 'avg_replies', 'avg_word_count',
                               'sentiment_mean', 'sentiment_std']
        
        # 2. 计算互动影响力得分
        user_activity['interaction_score'] = (user_activity['avg_likes'] + user_activity['avg_replies']) / 2
        
        # 3. 计算情绪稳定性
        user_activity['sentiment_stability'] = 1 / (1 + user_activity['sentiment_std'])
        
        # 4. 选择最终特征
        selected_features = [
            'sentiment_mean',  # 情绪主倾向
            'interaction_score',  # 互动影响力
            'comments_per_hour',  # 评论活跃度
            'avg_word_count'  # 表达信息密度
        ]
        
        # 5. 处理缺失值
        for col in selected_features:
            user_activity[col] = user_activity[col].fillna(user_activity[col].median())
        
        # 6. 标准化特征
        self.features = user_activity[['user_id'] + selected_features].copy()
        self.features[selected_features] = self.scaler.fit_transform(self.features[selected_features])
        
    def determine_optimal_clusters(self, max_clusters=10, stability_trials=5):
        """通过轮廓系数和稳定性实验确定最优聚类数"""
        X = self.features.drop('user_id', axis=1)
        silhouette_scores = []
        stability_scores = []
        
        for n_clusters in range(2, max_clusters + 1):
            # 计算轮廓系数
            kmeans = KMeans(n_clusters=n_clusters, random_state=42)
            cluster_labels = kmeans.fit_predict(X)
            silhouette_scores.append(silhouette_score(X, cluster_labels))
            
            # 进行稳定性实验
            stability_trial_scores = []
            for trial in range(stability_trials):
                kmeans_trial = KMeans(n_clusters=n_clusters, random_state=trial)
                labels_trial = kmeans_trial.fit_predict(X)
                stability_trial_scores.append(silhouette_score(X, labels_trial))
            stability_scores.append(np.mean(stability_trial_scores))
        
        # 绘制评估曲线
        plt.figure(figsize=(12, 5))
        
        plt.subplot(1, 2, 1)
        plt.plot(range(2, max_clusters + 1), silhouette_scores, marker='o')
        plt.title('轮廓系数曲线')
        plt.xlabel('聚类数')
        plt.ylabel('轮廓系数')
        plt.grid(True)
        
        plt.subplot(1, 2, 2)
        plt.plot(range(2, max_clusters + 1), stability_scores, marker='o', color='orange')
        plt.title('稳定性实验曲线')
        plt.xlabel('聚类数')
        plt.ylabel('平均轮廓系数')
        plt.grid(True)
        
        plt.tight_layout()
        plt.savefig('cluster_evaluation.png', dpi=300, bbox_inches='tight')
        plt.close()
        
        # 综合考虑轮廓系数和稳定性选择最优聚类数
        combined_scores = np.array(silhouette_scores) * np.array(stability_scores)
        self.n_clusters = np.argmax(combined_scores) + 2
        print(f"最优聚类数: {self.n_clusters}")
        
    def perform_clustering(self):
        """执行聚类分析"""
        X = self.features.drop('user_id', axis=1)
        self.kmeans = KMeans(n_clusters=self.n_clusters, random_state=42)
        self.features['cluster'] = self.kmeans.fit_predict(X)
        
    def analyze_clusters(self):
        """分析聚类结果"""
        # 计算每个聚类的特征统计
        cluster_stats = self.features.groupby('cluster').agg({
            'sentiment_mean': ['mean', 'std'],
            'interaction_score': ['mean', 'std'],
            'comments_per_hour': ['mean', 'std'],
            'avg_word_count': ['mean', 'std']
        }).round(3)
        
        # 绘制特征分布图
        plt.figure(figsize=(15, 10))
        
        features = ['sentiment_mean', 'interaction_score', 'comments_per_hour', 'avg_word_count']
        for i, feature in enumerate(features, 1):
            plt.subplot(2, 2, i)
            sns.boxplot(x='cluster', y=feature, data=self.features)
            plt.title(f'{feature} 分布')
            plt.xlabel('聚类')
            plt.ylabel(feature)
        
        plt.tight_layout()
        plt.savefig('cluster_feature_distribution.png', dpi=300, bbox_inches='tight')
        plt.close()
        
        return cluster_stats
        
    def save_results(self, cluster_stats):
        """保存分析结果"""
        os.makedirs('analysis_results', exist_ok=True)
        
        # 保存聚类结果
        self.features.to_csv('analysis_results/user_clusters_v2.csv', index=False)
        
        # 加载情感分析结果
        sentiment_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 
                                    'sentiment', 'analysis_results', 'sentiment_analysis_results.csv')
        sentiment_df = pd.read_csv(sentiment_path)
        
        # 合并聚类结果和情感分析结果
        merged_df = pd.merge(sentiment_df, 
                           self.features[['user_id', 'cluster']], 
                           on='user_id', 
                           how='left')
        
        # 保存合并后的结果
        merged_df.to_csv('analysis_results/merged_cluster_sentiment_results.csv', index=False)
        
        # 生成分析报告
        with open('analysis_results/cluster_analysis_report_v2.txt', 'w', encoding='utf-8') as f:
            f.write("用户聚类分析报告\n")
            f.write("="*50 + "\n\n")
            
            f.write("1. 聚类数量\n")
            f.write(f"聚类数: {self.n_clusters}\n\n")
            
            f.write("2. 聚类特征统计\n")
            f.write(str(cluster_stats) + "\n\n")
            
            f.write("3. 聚类描述\n")
            for cluster in range(self.n_clusters):
                cluster_data = self.features[self.features['cluster'] == cluster]
                f.write(f"\n聚类 {cluster}:\n")
                f.write(f"- 用户数量: {len(cluster_data)}\n")
                f.write(f"- 情绪倾向均值: {cluster_data['sentiment_mean'].mean():.2f}\n")
                f.write(f"- 互动影响力均值: {cluster_data['interaction_score'].mean():.2f}\n")
                f.write(f"- 评论活跃度均值: {cluster_data['comments_per_hour'].mean():.2f}\n")
                f.write(f"- 表达信息密度均值: {cluster_data['avg_word_count'].mean():.2f}\n")
            
    def run_analysis(self):
        """运行完整的分析流程"""
        print("开始用户聚类分析...")
        self.load_data()
        self.prepare_features()
        self.determine_optimal_clusters()
        self.perform_clustering()
        cluster_stats = self.analyze_clusters()
        self.save_results(cluster_stats)
        print("\n用户聚类分析完成！结果已保存到analysis_results目录。")

if __name__ == "__main__":
    analyzer = UserClusterAnalyzer('../../weibo_spider/data_processing/preprocessed_comments.csv')
    analyzer.run_analysis() 