import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Circle, RegularPolygon
from matplotlib.path import Path
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
from matplotlib.spines import Spine
import matplotlib.transforms as mtransforms
import os

# 设置环境变量以避免KMeans内存泄漏警告
os.environ['OMP_NUM_THREADS'] = '1'

# 设置中文显示
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC", "Microsoft YaHei", "Arial Unicode MS"]
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
plt.rcParams['figure.dpi'] = 300  # 设置图形清晰度


class StudentClusteringAnalyzer:
    """学生学习情况聚类分析器"""

    def __init__(self, csv_file='student_data.csv', n_clusters=4):
        """初始化分析器"""
        self.csv_file = csv_file
        self.n_clusters = n_clusters
        self.scaler = MinMaxScaler()
        self.kmeans = None
        self.data = None
        self.processed_data = None
        self.cluster_labels = None
        self.cluster_stats = None
        self.best_k = None

    def load_data(self):
        """从CSV文件加载数据"""
        try:
            self.data = pd.read_csv(self.csv_file)
            print(f"已从 {self.csv_file} 加载数据")
            return self.data
        except FileNotFoundError:
            print(f"错误: 文件 {self.csv_file} 不存在！请先运行数据生成脚本。")
            return None

    def preprocess_data(self):
        """预处理数据"""
        if self.data is None:
            self.load_data()

        if self.data is None:
            return None

        # 对成绩稳定性和进步率进行特殊处理，使其更符合实际意义
        processed_df = self.data.copy()
        processed_df['成绩稳定性'] = 1 - (processed_df['成绩标准差'] / 30)  # 转换为稳定性得分(越高越稳定)
        processed_df['进步率'] = (processed_df['进步率'] + 0.3) / 0.8  # 将[-0.3,0.5]映射到[0,1]

        # 选择需要标准化的特征
        features_to_scale = ['平均分', '成绩稳定性', '进步率', '作业完成率', '互动频率', '学习时长']
        self.processed_data = pd.DataFrame(
            self.scaler.fit_transform(processed_df[features_to_scale]),
            columns=features_to_scale
        )

        return self.processed_data

    def find_optimal_k(self, max_k=8):
        """使用肘部法和轮廓系数确定最佳聚类数量K"""
        if self.processed_data is None:
            self.preprocess_data()

        if self.processed_data is None:
            return None

        sse = []  # 误差平方和
        silhouette_scores = []  # 轮廓系数

        for k in range(2, max_k + 1):
            kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
            cluster_labels = kmeans.fit_predict(self.processed_data)

            sse.append(kmeans.inertia_)
            silhouette_scores.append(silhouette_score(self.processed_data, cluster_labels))

        # 确定最佳K值：选择轮廓系数最大的K
        self.best_k = np.argmax(silhouette_scores) + 2

        # 可视化结果
        plt.figure(figsize=(12, 5))

        plt.subplot(1, 2, 1)
        plt.plot(range(2, max_k + 1), sse, 'bo-')
        plt.xlabel('聚类数量 K')
        plt.ylabel('误差平方和 (SSE)')
        plt.title('肘部法确定最佳K值')

        plt.subplot(1, 2, 2)
        plt.plot(range(2, max_k + 1), silhouette_scores, 'ro-')
        plt.xlabel('聚类数量 K')
        plt.ylabel('轮廓系数')
        plt.title('轮廓系数评估聚类效果')

        plt.tight_layout()
        plt.savefig('optimal_k_selection.png', dpi=300)
        plt.close()

        print(f"最佳聚类数量 K = {self.best_k}")
        return self.best_k

    def perform_clustering(self, k=None):
        """执行K-means聚类"""
        if self.processed_data is None:
            self.preprocess_data()

        if self.processed_data is None:
            return None, None

        if k is None:
            if self.best_k is None:
                self.find_optimal_k()
            k = self.best_k

        self.kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
        self.cluster_labels = self.kmeans.fit_predict(self.processed_data)

        # 将聚类结果添加到原始数据
        self.data['聚类标签'] = self.cluster_labels

        # 计算每个聚类的统计信息
        self.cluster_stats = self.processed_data.copy()
        self.cluster_stats['聚类标签'] = self.cluster_labels
        self.cluster_stats = self.cluster_stats.groupby('聚类标签').mean().reset_index()

        return self.cluster_labels, self.cluster_stats

    def create_radar_chart(self, fig=None, position=111):
        """创建雷达图，展示各聚类的特征差异"""
        if self.cluster_stats is None:
            self.perform_clustering()

        features = self.cluster_stats.columns[1:]  # 排除聚类标签列
        n_features = len(features)
        n_clusters = len(self.cluster_stats)

        # 创建雷达图的角度
        angles = np.linspace(0, 2 * np.pi, n_features, endpoint=False).tolist()
        angles += angles[:1]  # 闭合雷达图

        # 动态生成颜色列表
        cmap = plt.cm.get_cmap('tab20')
        colors = [cmap(i % 20) for i in range(n_clusters)]
        cluster_names = [f'群体 {i + 1}' for i in range(n_clusters)]

        if fig is None:
            fig = plt.figure(figsize=(10, 8))

        ax = fig.add_subplot(position, polar=True)

        # 绘制每个聚类的轮廓
        for i, row in self.cluster_stats.iterrows():
            values = row[1:].tolist()
            values += values[:1]  # 闭合雷达图
            ax.plot(angles, values, 'o-', linewidth=2, color=colors[i], label=cluster_names[i])
            ax.fill(angles, values, alpha=0.1, color=colors[i])

        # 设置坐标轴标签
        ax.set_thetagrids(np.degrees(angles[:-1]), features)

        # 设置雷达图范围
        ax.set_ylim(0, 1)

        # 添加图例
        ax.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))

        return fig, ax

    def visualize_clusters(self):
        """可视化聚类结果 - 拆分为多个图表"""
        if self.cluster_stats is None:
            self.perform_clustering()

        if self.cluster_stats is None:
            return

        # 1. 雷达图：各群体学习特征对比
        plt.figure(figsize=(10, 8))
        self.create_radar_chart(plt.gcf(), 111)
        plt.title('各群体学习特征对比')
        plt.tight_layout()
        plt.savefig('cluster_features_radar.png', dpi=300)
        plt.close()

        # 2. 聚类分布饼图
        plt.figure(figsize=(8, 8))
        cluster_counts = self.data['聚类标签'].value_counts().sort_index()

        # 动态生成颜色列表
        cmap = plt.cm.get_cmap('tab20')
        colors = [cmap(i % 20) for i in range(len(cluster_counts))]

        plt.pie(
            cluster_counts,
            labels=[f'群体 {i + 1}' for i in cluster_counts.index],
            autopct='%1.1f%%',
            colors=colors,
            startangle=90
        )
        plt.axis('equal')
        plt.title('各群体学生占比')
        plt.tight_layout()
        plt.savefig('cluster_distribution_pie.png', dpi=300)
        plt.close()

        # 3. 各特征在不同聚类中的分布（箱线图）
        plt.figure(figsize=(12, 8))

        # 选择要展示的特征
        features = ['平均分', '成绩标准差', '进步率', '作业完成率', '互动频率', '学习时长']
        n_features = len(features)

        for i, feature in enumerate(features, 1):
            plt.subplot(2, 3, i)
            sns.boxplot(x='聚类标签', y=feature, data=self.data)
            plt.title(f'{feature}分布')

        plt.tight_layout()
        plt.savefig('feature_distribution_boxplot.png', dpi=300)
        plt.close()

        # 4. 聚类中心对比图
        plt.figure(figsize=(12, 8))
        cluster_centers = self.kmeans.cluster_centers_
        feature_names = self.processed_data.columns  # 使用预处理后的特征名

        for i in range(self.best_k):
            plt.plot(feature_names, cluster_centers[i], 'o-', label=f'群体 {i + 1}')

        plt.xticks(rotation=45, ha='right')
        plt.ylabel('标准化值')
        plt.title('各群体聚类中心对比')
        plt.legend()
        plt.tight_layout()
        plt.savefig('cluster_centers_comparison.png', dpi=300)
        plt.close()

        print("\n可视化结果已保存为以下图片文件：")
        print("1. optimal_k_selection.png - 最佳聚类数量选择")
        print("2. cluster_features_radar.png - 各群体学习特征雷达图")
        print("3. cluster_distribution_pie.png - 各群体学生占比饼图")
        print("4. feature_distribution_boxplot.png - 各特征在不同聚类中的分布")
        print("5. cluster_centers_comparison.png - 各群体聚类中心对比")

    def generate_teaching_suggestions(self):
        """根据聚类结果生成教学建议"""
        if self.cluster_stats is None:
            self.perform_clustering()

        if self.cluster_stats is None:
            return

        print("\n===== 教学策略建议 =====")

        # 对各群体进行命名和建议
        for i, row in self.cluster_stats.iterrows():
            # 根据特征确定群体类型
            if row['平均分'] > 0.7 and row['成绩稳定性'] > 0.7:
                group_type = "优秀稳定型"
                suggestion = (
                    "该群体学生基础扎实、学习习惯良好。建议提供拓展性学习资源，"
                    "鼓励担任学习小组组长，带动其他学生进步。"
                )
            elif row['进步率'] > 0.6 and row['互动频率'] > 0.6:
                group_type = "高互动进步型"
                suggestion = (
                    "该群体学生积极性高、进步明显。建议提供挑战性问题，"
                    "加强一对一指导，加速能力提升。"
                )
            elif row['成绩稳定性'] < 0.4 or row['学习时长'] < 0.4:
                group_type = "波动型"
                suggestion = (
                    "该群体学生成绩波动较大或学习投入不足。建议帮助制定规律学习计划，"
                    "培养良好学习习惯，定期跟踪学习状态。"
                )
            else:
                group_type = "待改进型"
                suggestion = (
                    "该群体学生当前表现有待提高。建议简化作业难度，"
                    "安排基础辅导小组，逐步提升学习信心。"
                )

            print(f"\n群体 {i + 1} ({group_type}):")
            print(f"  特征: 平均分 {row['平均分']:.2%}, 进步率 {row['进步率'] * 0.8 - 0.3:.2%}, "
                  f"作业完成率 {row['作业完成率']:.2%}, 互动频率 {row['互动频率']:.2%}")
            print(f"  建议: {suggestion}")

    def analyze_and_report(self):
        """执行完整分析并生成报告"""
        print("===== 学生学习情况聚类分析报告 =====")

        # 加载数据
        if self.load_data() is None:
            return

        # 预处理数据
        self.preprocess_data()
        print("数据预处理完成")

        # 确定最佳K值
        self.find_optimal_k()

        # 执行聚类
        self.perform_clustering()
        print(f"已完成 {self.best_k} 类聚类分析")

        # 可视化结果
        self.visualize_clusters()

        # 生成教学建议
        self.generate_teaching_suggestions()


# 执行分析
if __name__ == "__main__":
    analyzer = StudentClusteringAnalyzer(csv_file='student_data.csv', n_clusters=4)
    analyzer.analyze_and_report()