import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, silhouette_score
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn as sns
from SimpleDataLoader import SimplifiedFeatureExtractor
from MultiModuleFeatureExtractor import MultiModuleFeatureExtractor
import pandas as pd
import time  

# 设置随机种子保证可复现性
np.random.seed(42)

# 1. 数据加载和预处理函数
def load_data():
    """数据加载函数（保持不变）"""
    student_info = pd.read_csv('./anonymisedData/anonymisedData/studentInfo.csv')
    student_vle = pd.read_csv('./anonymisedData/anonymisedData/studentVle.csv')
    student_assessment = pd.read_csv('./anonymisedData/anonymisedData/studentAssessment.csv')
    student_registration = pd.read_csv('./anonymisedData/anonymisedData/studentRegistration.csv')
    assessments = pd.read_csv('./anonymisedData/anonymisedData/assessments.csv')
    vle = pd.read_csv('./anonymisedData/anonymisedData/vle.csv')
    
    # 1. 初始化MultiModuleFeatureExtractor时包含所有三个模块
    extractor = MultiModuleFeatureExtractor(student_info, student_vle, student_assessment, student_registration, vle, assessments,modules=['AAA', 'BBB', 'CCC'],cache_dir='./feature_cache')
    X_train, X_val, X_test, y_train, y_val, y_test = extractor.prepare_combined_train_val_test_split(test_size=0.2,val_size=0.1,random_state=42)
    
    # 2. 复合特征添加
    def safe_division(a, b, default=0):
        return np.where(b != 0, a / (b + 1e-6), default)  # 防止除零
    min_val = X_train['days_to_first_login'].min()
    abs_min = abs(min_val) if min_val < 0 else 1  
    
    for df in [X_train, X_val, X_test]:
        df['effective_study'] = safe_division(df['core_content_clicks'] * df['login_days'],df['days_to_first_login'] + abs_min,default=df['core_content_clicks'].median())
    return X_train, X_val, X_test, y_train, y_val, y_test

#使用轮廓系数确定最佳K值
def find_optimal_k(X_scaled, max_k=10):
    silhouette_scores = []
    for k in range(2, max_k+1):
        kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
        labels = kmeans.fit_predict(X_scaled)
        silhouette_scores.append(silhouette_score(X_scaled, labels))
    best_k = np.argmax(silhouette_scores) + 2  
    return best_k

# 特征重要性分析
def analyze_feature_importance(kmeans, scaler, feature_names, cluster_to_label):
    # 计算高风险中心的特征重要性
    high_risk_centers = []
    for cluster_id, label in cluster_to_label.items():
        if label == 1:
            high_risk_centers.append(kmeans.cluster_centers_[cluster_id])
    
    high_risk_mean = np.mean(high_risk_centers, axis=0) if high_risk_centers else np.zeros(len(feature_names))
    global_mean = scaler.inverse_transform(np.zeros((1, len(feature_names))))
    high_risk_mean_orig = scaler.inverse_transform(high_risk_mean.reshape(1, -1))
    # 特征重要性为高风险中心与全局平均值的差异
    risk_diff = high_risk_mean_orig - global_mean
    importance = pd.DataFrame({'Feature': feature_names,'Coefficient': risk_diff.flatten(), 'Abs_Importance': np.abs(risk_diff).flatten()}).sort_values('Abs_Importance', ascending=False)
    
    # 可视化
    plt.figure(figsize=(10, 6)) 

    colors = ['red' if x > 0 else 'green' for x in importance.head(10)['Coefficient']]
    bars = plt.barh(importance.head(10)['Feature'],importance.head(10)['Coefficient'],color=colors)
    # 相同的数值标签格式
    for bar in bars:
        width = bar.get_width()
        plt.text(width/2, bar.get_y() + bar.get_height()/2,f'{width:.3f}',va='center',ha='center')
    
    plt.axvline(x=0, color='gray', linestyle='--')
    plt.title('Feature Importance for High-Risk Cluster (Red=High-Risk, Green=Low-Risk)')
    plt.xlabel('Coefficient value')
    plt.ylabel('Feature name')
    
    plt.tight_layout()
    plt.savefig('Kmeans_feature_importance.png', dpi=120, bbox_inches='tight')  
    plt.close()
    
    return importance


def train_and_evaluate():
    # 1. 数据加载
    X_train, X_val, X_test, y_train, y_val, y_test = load_data()
    print(f"训练集: {X_train.shape[0]}样本 | 验证集: {X_val.shape[0]} | 测试集: {X_test.shape[0]}")
    # 2. 数据标准化
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_val_scaled = scaler.transform(X_val)
    X_test_scaled = scaler.transform(X_test)
    # 3. 确定最佳K值（使用训练集）
    train_start = time.time()
    best_k = find_optimal_k(X_train_scaled)
    print(f"\n最佳聚类数量: {best_k}")
    
    # 4. 训练最终模型
    kmeans = KMeans(n_clusters=best_k, random_state=42, n_init=10,init="k-means++", max_iter=300)
    kmeans.fit(X_train_scaled)
    train_time = time.time() - train_start
    print(f"模型训练耗时: {train_time:.2f}秒")
    
    # 5. 为每个聚类分配标签（基于训练集多数投票）
    cluster_to_label = {}
    for cluster in range(best_k):
        majority_label = np.bincount(y_train[kmeans.labels_ == cluster]).argmax()
        cluster_to_label[cluster] = majority_label
    # 6. 在验证集上评估
    val_clusters = kmeans.predict(X_val_scaled)
    val_pred = np.array([cluster_to_label[c] for c in val_clusters])
    print("\n验证集评估:")
    print(classification_report(y_val, val_pred))
    # 7. 在测试集上最终评估
    test_clusters = kmeans.predict(X_test_scaled)
    test_pred = np.array([cluster_to_label[c] for c in test_clusters])
    
    print("\n测试集评估:")
    print(f"准确率: {accuracy_score(y_test, test_pred):.4f}")
    print("\n混淆矩阵:")
    print(confusion_matrix(y_test, test_pred))
    print("\n分类报告:")
    print(classification_report(y_test, test_pred))
    
    # 8. 特征重要性分析
    feature_importance = analyze_feature_importance(kmeans, scaler, X_train.columns, cluster_to_label)
    print("\n特征重要性:")
    print(feature_importance.head(10))
    
if __name__ == "__main__":
    train_and_evaluate()