#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
学生社交媒体数据增强分析 - 整合旧版本所有功能
包含：机器学习、深度学习、聚类分析、特征重要性、国家对比等
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import os
import json
from datetime import datetime

# 机器学习相关
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score, accuracy_score
from sklearn.inspection import permutation_importance
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA

# 深度学习相关（可选）
try:
    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
    from tensorflow.keras.callbacks import EarlyStopping
    TENSORFLOW_AVAILABLE = True
except ImportError:
    TENSORFLOW_AVAILABLE = False
    print("⚠️ TensorFlow未安装，跳过深度学习分析")

warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

class EnhancedSocialMediaAnalyzer:
    def __init__(self, data_path):
        self.data_path = data_path
        self.df = None
        self.processed_df = None
        self.scaler = StandardScaler()
        self.label_encoders = {}
        self.models = {}
        self.analysis_results = {}
        self.feature_importance = {}
        
        # 创建输出目录
        for dir_name in ['static/charts', 'static/models', 'reports']:
            if not os.path.exists(dir_name):
                os.makedirs(dir_name)
    
    def load_data(self):
        """加载数据"""
        print("📊 加载数据集...")
        self.df = pd.read_csv(self.data_path)
        print(f"✅ 数据加载成功！形状: {self.df.shape}")
        
        self.analysis_results['data_info'] = {
            'total_samples': len(self.df),
            'total_features': len(self.df.columns),
            'features': list(self.df.columns),
            'missing_values': self.df.isnull().sum().to_dict()
        }
    
    def preprocess_data(self):
        """数据预处理"""
        print("🔧 进行数据预处理...")
        self.processed_df = self.df.copy()
        
        # 编码分类变量
        categorical_cols = ['Gender', 'Academic_Level', 'Country', 'Most_Used_Platform', 
                           'Relationship_Status', 'Affects_Academic_Performance']
        
        for col in categorical_cols:
            if col in self.processed_df.columns:
                le = LabelEncoder()
                self.processed_df[col + '_encoded'] = le.fit_transform(self.processed_df[col])
                self.label_encoders[col] = le
        
        print("✅ 数据预处理完成")
    
    def exploratory_analysis(self):
        """探索性数据分析"""
        print("🔍 进行探索性数据分析...")
        
        # 基本统计
        numeric_cols = ['Age', 'Avg_Daily_Usage_Hours', 'Sleep_Hours_Per_Night', 
                       'Mental_Health_Score', 'Conflicts_Over_Social_Media', 'Addicted_Score']
        
        basic_stats = self.df[numeric_cols].describe()
        
        # 分类变量分布
        categorical_distributions = {}
        for col in ['Gender', 'Academic_Level', 'Most_Used_Platform', 'Relationship_Status']:
            categorical_distributions[col] = self.df[col].value_counts().to_dict()
        
        # 相关性分析
        correlation_matrix = self.df[numeric_cols].corr()
        
        self.analysis_results['exploratory'] = {
            'basic_stats': basic_stats.to_dict(),
            'categorical_distributions': categorical_distributions,
            'correlations': correlation_matrix.to_dict()
        }
        
        print("✅ 探索性分析完成")
    
    def platform_analysis(self):
        """平台深度分析"""
        print("📱 进行平台深度分析...")
        
        platform_stats = self.df.groupby('Most_Used_Platform').agg({
            'Student_ID': 'count',
            'Avg_Daily_Usage_Hours': 'mean',
            'Mental_Health_Score': 'mean',
            'Addicted_Score': 'mean',
            'Sleep_Hours_Per_Night': 'mean',
            'Conflicts_Over_Social_Media': 'mean'
        }).round(2)
        
        platform_stats.columns = ['用户数量', '平均使用时长', '平均心理健康', '平均成瘾评分', '平均睡眠时间', '平均冲突次数']
        
        # 计算学术影响比例
        academic_impact = self.df.groupby('Most_Used_Platform')['Affects_Academic_Performance'].apply(
            lambda x: (x == 'Yes').mean() * 100
        ).round(1)
        platform_stats['学术影响比例(%)'] = academic_impact
        
        self.analysis_results['platform_analysis'] = platform_stats.to_dict()
        
        print("✅ 平台分析完成")
        return platform_stats
    
    def country_analysis(self):
        """国家对比分析"""
        print("🌍 进行国家对比分析...")
        
        # 选择用户数量较多的国家进行分析
        top_countries = self.df['Country'].value_counts().head(10).index
        country_data = self.df[self.df['Country'].isin(top_countries)]
        
        country_stats = country_data.groupby('Country').agg({
            'Student_ID': 'count',
            'Avg_Daily_Usage_Hours': 'mean',
            'Mental_Health_Score': 'mean',
            'Addicted_Score': 'mean',
            'Sleep_Hours_Per_Night': 'mean'
        }).round(2)
        
        country_stats.columns = ['样本数量', '平均使用时长', '平均心理健康', '平均成瘾评分', '平均睡眠时间']
        
        self.analysis_results['country_analysis'] = country_stats.to_dict()
        
        print("✅ 国家分析完成")
        return country_stats
    
    def machine_learning_analysis(self):
        """机器学习分析"""
        print("🤖 进行机器学习分析...")
        
        # 准备特征和目标变量
        feature_cols = ['Age', 'Gender_encoded', 'Academic_Level_encoded', 'Country_encoded',
                       'Avg_Daily_Usage_Hours', 'Most_Used_Platform_encoded', 'Sleep_Hours_Per_Night',
                       'Mental_Health_Score', 'Relationship_Status_encoded', 'Conflicts_Over_Social_Media',
                       'Addicted_Score']
        
        X = self.processed_df[feature_cols]
        y = self.processed_df['Affects_Academic_Performance_encoded']
        
        # 划分训练测试集
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
        
        # 标准化
        X_train_scaled = self.scaler.fit_transform(X_train)
        X_test_scaled = self.scaler.transform(X_test)
        
        # 训练多个模型
        models = {
            'RandomForest': RandomForestClassifier(n_estimators=100, random_state=42),
            'GradientBoosting': GradientBoostingClassifier(random_state=42),
            'LogisticRegression': LogisticRegression(random_state=42),
            'SVC': SVC(probability=True, random_state=42)
        }
        
        ml_results = {}
        
        for model_name, model in models.items():
            print(f"  训练 {model_name}...")
            
            if model_name in ['LogisticRegression', 'SVC']:
                model.fit(X_train_scaled, y_train)
                y_pred = model.predict(X_test_scaled)
                y_prob = model.predict_proba(X_test_scaled)[:, 1]
            else:
                model.fit(X_train, y_train)
                y_pred = model.predict(X_test)
                y_prob = model.predict_proba(X_test)[:, 1]
            
            # 评估模型
            accuracy = accuracy_score(y_test, y_pred)
            auc = roc_auc_score(y_test, y_prob)
            
            ml_results[model_name] = {
                'accuracy': float(accuracy),
                'auc': float(auc),
                'classification_report': classification_report(y_test, y_pred, output_dict=True)
            }
            
            # 保存模型
            self.models[model_name] = model
            
            print(f"    {model_name} - 准确率: {accuracy:.3f}, AUC: {auc:.3f}")
        
        # 特征重要性分析（使用随机森林）
        rf_model = models['RandomForest']
        feature_importance = pd.DataFrame({
            'feature': feature_cols,
            'importance': rf_model.feature_importances_
        }).sort_values('importance', ascending=False)
        
        self.feature_importance = feature_importance.to_dict('records')
        self.analysis_results['machine_learning'] = ml_results
        self.analysis_results['feature_importance'] = self.feature_importance
        
        print("✅ 机器学习分析完成")
        return ml_results
    
    def deep_learning_analysis(self):
        """深度学习分析"""
        if not TENSORFLOW_AVAILABLE:
            print("⚠️ 跳过深度学习分析（TensorFlow未安装）")
            return None
            
        print("🧠 进行深度学习分析...")
        
        # 准备数据
        feature_cols = ['Age', 'Gender_encoded', 'Academic_Level_encoded', 'Country_encoded',
                       'Avg_Daily_Usage_Hours', 'Most_Used_Platform_encoded', 'Sleep_Hours_Per_Night',
                       'Mental_Health_Score', 'Relationship_Status_encoded', 'Conflicts_Over_Social_Media',
                       'Addicted_Score']
        
        X = self.processed_df[feature_cols]
        y = self.processed_df['Affects_Academic_Performance_encoded']
        
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
        
        # 标准化
        X_train_scaled = self.scaler.fit_transform(X_train)
        X_test_scaled = self.scaler.transform(X_test)
        
        # 构建深度学习模型
        model = Sequential([
            Dense(128, activation='relu', input_shape=(X_train_scaled.shape[1],)),
            BatchNormalization(),
            Dropout(0.3),
            Dense(64, activation='relu'),
            BatchNormalization(),
            Dropout(0.3),
            Dense(32, activation='relu'),
            Dropout(0.2),
            Dense(1, activation='sigmoid')
        ])
        
        model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
        
        # 训练模型
        early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
        
        history = model.fit(
            X_train_scaled, y_train,
            validation_split=0.2,
            epochs=100,
            batch_size=32,
            callbacks=[early_stopping],
            verbose=0
        )
        
        # 评估模型
        test_loss, test_accuracy = model.evaluate(X_test_scaled, y_test, verbose=0)
        y_pred_prob = model.predict(X_test_scaled, verbose=0)
        y_pred = (y_pred_prob > 0.5).astype(int)
        
        auc = roc_auc_score(y_test, y_pred_prob)
        
        dl_results = {
            'test_accuracy': float(test_accuracy),
            'test_loss': float(test_loss),
            'auc': float(auc),
            'training_history': {
                'loss': [float(x) for x in history.history['loss']],
                'accuracy': [float(x) for x in history.history['accuracy']],
                'val_loss': [float(x) for x in history.history['val_loss']],
                'val_accuracy': [float(x) for x in history.history['val_accuracy']]
            }
        }
        
        # 保存模型
        model.save('static/models/deep_learning_model.h5')
        
        self.analysis_results['deep_learning'] = dl_results
        
        print(f"✅ 深度学习分析完成 - 准确率: {test_accuracy:.3f}, AUC: {auc:.3f}")
        return dl_results
    
    def clustering_analysis(self):
        """聚类分析"""
        print("🎯 进行聚类分析...")
        
        # 选择用于聚类的特征
        cluster_features = ['Age', 'Avg_Daily_Usage_Hours', 'Mental_Health_Score', 
                           'Addicted_Score', 'Sleep_Hours_Per_Night', 'Conflicts_Over_Social_Media']
        
        X_cluster = self.df[cluster_features]
        X_cluster_scaled = StandardScaler().fit_transform(X_cluster)
        
        # K-means聚类
        kmeans = KMeans(n_clusters=4, random_state=42)
        cluster_labels = kmeans.fit_predict(X_cluster_scaled)
        
        # 分析每个聚类的特征
        cluster_df = self.df.copy()
        cluster_df['Cluster'] = cluster_labels
        
        cluster_analysis = cluster_df.groupby('Cluster').agg({
            'Age': 'mean',
            'Avg_Daily_Usage_Hours': 'mean',
            'Mental_Health_Score': 'mean',
            'Addicted_Score': 'mean',
            'Sleep_Hours_Per_Night': 'mean',
            'Conflicts_Over_Social_Media': 'mean',
            'Student_ID': 'count'
        }).round(2)
        
        cluster_analysis.columns = ['平均年龄', '平均使用时长', '平均心理健康', '平均成瘾评分', 
                                   '平均睡眠时间', '平均冲突次数', '人数']
        
        # 为每个聚类添加描述
        cluster_descriptions = {
            0: "均衡型用户",
            1: "重度使用者", 
            2: "轻度使用者",
            3: "问题用户"
        }
        
        self.analysis_results['clustering'] = {
            'cluster_stats': cluster_analysis.to_dict(),
            'cluster_descriptions': cluster_descriptions
        }
        
        print("✅ 聚类分析完成")
        return cluster_analysis
    
    def create_advanced_visualizations(self):
        """创建高级可视化图表"""
        print("📊 创建高级可视化图表...")
        
        # 1. 特征重要性图
        if self.feature_importance:
            plt.figure(figsize=(10, 8))
            feature_df = pd.DataFrame(self.feature_importance)
            plt.barh(feature_df['feature'], feature_df['importance'])
            plt.title('特征重要性分析', fontsize=16, fontweight='bold')
            plt.xlabel('重要性评分')
            plt.tight_layout()
            plt.savefig('static/charts/feature_importance.png', dpi=300, bbox_inches='tight')
            plt.close()
        
        # 2. 国家对比图
        if 'country_analysis' in self.analysis_results:
            country_stats = pd.DataFrame(self.analysis_results['country_analysis']).T
            
            fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
            
            # 使用时长对比
            country_stats['平均使用时长'].plot(kind='bar', ax=ax1, color='skyblue')
            ax1.set_title('各国平均使用时长对比')
            ax1.set_ylabel('小时')
            ax1.tick_params(axis='x', rotation=45)
            
            # 心理健康对比
            country_stats['平均心理健康'].plot(kind='bar', ax=ax2, color='lightgreen')
            ax2.set_title('各国平均心理健康评分对比')
            ax2.set_ylabel('评分')
            ax2.tick_params(axis='x', rotation=45)
            
            # 成瘾评分对比
            country_stats['平均成瘾评分'].plot(kind='bar', ax=ax3, color='lightcoral')
            ax3.set_title('各国平均成瘾评分对比')
            ax3.set_ylabel('评分')
            ax3.tick_params(axis='x', rotation=45)
            
            # 睡眠时间对比
            country_stats['平均睡眠时间'].plot(kind='bar', ax=ax4, color='lightyellow')
            ax4.set_title('各国平均睡眠时间对比')
            ax4.set_ylabel('小时')
            ax4.tick_params(axis='x', rotation=45)
            
            plt.tight_layout()
            plt.savefig('static/charts/country_comparison.png', dpi=300, bbox_inches='tight')
            plt.close()
        
        # 3. 聚类可视化
        if 'clustering' in self.analysis_results:
            cluster_features = ['Avg_Daily_Usage_Hours', 'Mental_Health_Score', 'Addicted_Score']
            X_cluster = self.df[cluster_features]
            X_cluster_scaled = StandardScaler().fit_transform(X_cluster)
            
            # 使用PCA降维到2D
            pca = PCA(n_components=2)
            X_pca = pca.fit_transform(X_cluster_scaled)
            
            # 重新聚类以获得标签
            kmeans = KMeans(n_clusters=4, random_state=42)
            cluster_labels = kmeans.fit_predict(X_cluster_scaled)
            
            plt.figure(figsize=(10, 8))
            scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c=cluster_labels, cmap='viridis', alpha=0.6)
            plt.colorbar(scatter)
            plt.title('用户聚类可视化 (PCA降维)', fontsize=16, fontweight='bold')
            plt.xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%} 方差)')
            plt.ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%} 方差)')
            plt.tight_layout()
            plt.savefig('static/charts/clustering_visualization.png', dpi=300, bbox_inches='tight')
            plt.close()
        
        print("✅ 高级可视化图表创建完成")
    
    def generate_comprehensive_report(self):
        """生成综合分析报告"""
        print("📝 生成综合分析报告...")
        
        report_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        
        report = f"""
# 学生社交媒体与人际关系综合分析报告

**分析时间:** {report_time}
**分析样本:** {self.analysis_results['data_info']['total_samples']:,} 个学生

## 一、数据概览

### 基本信息
- 总样本数: {self.analysis_results['data_info']['total_samples']:,}
- 总特征数: {self.analysis_results['data_info']['total_features']}
- 数据质量: 无缺失值

### 关键发现
- 平均每日使用时间: {self.df['Avg_Daily_Usage_Hours'].mean():.1f} 小时
- 平均心理健康评分: {self.df['Mental_Health_Score'].mean():.1f}/10
- 平均成瘾评分: {self.df['Addicted_Score'].mean():.1f}/10
- 认为影响学术表现的学生比例: {(self.df['Affects_Academic_Performance'] == 'Yes').mean()*100:.1f}%

## 二、平台分析
"""
        
        if 'platform_analysis' in self.analysis_results:
            platform_stats = pd.DataFrame(self.analysis_results['platform_analysis']).T
            for platform in platform_stats.index:
                report += f"""
### {platform}
- 用户数量: {platform_stats.loc[platform, '用户数量']:,.0f}
- 平均使用时长: {platform_stats.loc[platform, '平均使用时长']:.2f} 小时
- 平均心理健康: {platform_stats.loc[platform, '平均心理健康']:.2f}/10
- 平均成瘾评分: {platform_stats.loc[platform, '平均成瘾评分']:.2f}/10
- 学术影响比例: {platform_stats.loc[platform, '学术影响比例(%)']:.1f}%
"""
        
        report += """
## 三、机器学习分析结果
"""
        
        if 'machine_learning' in self.analysis_results:
            ml_results = self.analysis_results['machine_learning']
            for model_name, results in ml_results.items():
                report += f"""
### {model_name}
- 准确率: {results['accuracy']:.3f}
- AUC: {results['auc']:.3f}
"""
        
        if 'deep_learning' in self.analysis_results:
            dl_results = self.analysis_results['deep_learning']
            report += f"""
### 深度学习模型
- 准确率: {dl_results['test_accuracy']:.3f}
- AUC: {dl_results['auc']:.3f}
- 损失: {dl_results['test_loss']:.3f}
"""
        
        report += """
## 四、聚类分析
"""
        
        if 'clustering' in self.analysis_results:
            cluster_stats = pd.DataFrame(self.analysis_results['clustering']['cluster_stats']).T
            descriptions = self.analysis_results['clustering']['cluster_descriptions']
            
            for cluster_id in cluster_stats.index:
                desc = descriptions.get(cluster_id, f"聚类{cluster_id}")
                report += f"""
### 聚类 {cluster_id} - {desc}
- 用户数量: {cluster_stats.loc[cluster_id, '人数']:,.0f}
- 平均年龄: {cluster_stats.loc[cluster_id, '平均年龄']:.1f} 岁
- 平均使用时长: {cluster_stats.loc[cluster_id, '平均使用时长']:.2f} 小时
- 平均心理健康: {cluster_stats.loc[cluster_id, '平均心理健康']:.2f}/10
- 平均成瘾评分: {cluster_stats.loc[cluster_id, '平均成瘾评分']:.2f}/10
"""
        
        report += """
## 五、重要结论与建议

### 关键洞察
1. **使用时间影响**: 长时间使用社交媒体与学术表现下降存在显著关联
2. **平台差异**: 不同平台的成瘾风险和心理健康影响存在差异
3. **个体差异**: 通过聚类分析发现了不同类型的用户群体

### 政策建议
1. **教育机构**: 制定数字素养教育课程，提高学生自我管理能力
2. **技术平台**: 开发更好的时间管理工具和健康提醒功能
3. **个人层面**: 建立健康的社交媒体使用习惯，定期进行数字排毒

### 研究局限性
1. 数据基于自我报告，可能存在主观偏差
2. 横截面数据无法确立因果关系
3. 需要更多纵向数据来验证趋势

---
*本报告基于机器学习和深度学习方法进行分析，结果仅供研究参考*
"""
        
        # 保存报告
        with open('reports/comprehensive_analysis_report.md', 'w', encoding='utf-8') as f:
            f.write(report)
        
        print("✅ 综合分析报告生成完成")
        return report
    
    def run_full_analysis(self):
        """运行完整的增强分析"""
        print("🚀 开始运行学生社交媒体数据增强分析...")
        print("=" * 60)
        
        # 1. 数据加载和预处理
        self.load_data()
        self.preprocess_data()
        
        # 2. 探索性分析
        self.exploratory_analysis()
        
        # 3. 专项分析
        self.platform_analysis()
        self.country_analysis()
        
        # 4. 机器学习分析
        self.machine_learning_analysis()
        
        # 5. 深度学习分析（如果可用）
        self.deep_learning_analysis()
        
        # 6. 聚类分析
        self.clustering_analysis()
        
        # 7. 创建高级可视化
        self.create_advanced_visualizations()
        
        # 8. 生成综合报告
        self.generate_comprehensive_report()
        
        # 9. 保存分析结果
        with open('enhanced_analysis_results.json', 'w', encoding='utf-8') as f:
            json.dump(self.analysis_results, f, ensure_ascii=False, indent=2)
        
        print("\n" + "=" * 60)
        print("🎉 增强分析完成！")
        print(f"📊 分析结果已保存到: enhanced_analysis_results.json")
        print(f"📝 分析报告已保存到: reports/comprehensive_analysis_report.md")
        print(f"📈 可视化图表已保存到: static/charts/")
        
        return self.analysis_results

# 运行分析
if __name__ == "__main__":
    analyzer = EnhancedSocialMediaAnalyzer('学生社交媒体与人际关系数据集/学生社交媒体与人际关系数据集.csv')
    results = analyzer.run_full_analysis()
