import pandas as pd
import matplotlib.pyplot as plt
import time  # 新增时间计算模块
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, roc_auc_score
from MultiModuleFeatureExtractor import MultiModuleFeatureExtractor
import numpy as np

# 加载数据进行预处理
def load_data():
    # 1. 数据加载
    student_info = pd.read_csv('./anonymisedData/anonymisedData/studentInfo.csv')
    student_vle = pd.read_csv('./anonymisedData/anonymisedData/studentVle.csv')
    student_assessment = pd.read_csv('./anonymisedData/anonymisedData/studentAssessment.csv')
    student_registration = pd.read_csv('./anonymisedData/anonymisedData/studentRegistration.csv')
    assessments = pd.read_csv('./anonymisedData/anonymisedData/assessments.csv')
    vle = pd.read_csv('./anonymisedData/anonymisedData/vle.csv')

    # 2. 特征提取
    # # 初始化特征提取器（
    # feature_extractor = SimplifiedFeatureExtractor(
    #     student_info, student_vle, student_assessment,
    #     student_registration, vle, assessments, target_module='CCC'  # 选择模块CCC
    # )

    # # 获取数据集（70%训练，15%验证，15%测试）
    # X_train, X_val, X_test, y_train, y_val, y_test = feature_extractor.prepare_train_val_test_split(
    #     test_size=0.2,  # 30%用于测试+验证
    #     val_size=0.1,   # 验证集占30%中的50%（即总15%）
    # )
    extractor = MultiModuleFeatureExtractor(student_info, student_vle, student_assessment, student_registration, vle, assessments,modules=['AAA', 'BBB', 'CCC'],cache_dir='./feature_cache')
    # 3. 数据分割
    return extractor.prepare_combined_train_val_test_split(test_size=0.2,val_size=0.1,random_state=42)

# 训练逻辑回归模型
def train_model(X_train, y_train):
    model = LogisticRegression(penalty='l2',C=1.0,solver='lbfgs',max_iter=1000,class_weight='balanced')
    # 记录开始时间
    start_time = time.time()
    model.fit(X_train, y_train)
    # 计算耗时
    training_time = time.time() - start_time
    return model, training_time  # 返回模型和训练时间

# 评估模型性能
def evaluate_model(model, X, y, set_name):
    pred = model.predict(X)
    prob = model.predict_proba(X)[:, 1]
    print(f"\n{set_name}集性能:")
    print(classification_report(y, pred))
    print(f"AUC: {roc_auc_score(y, prob):.4f}")
    return pred, prob

# 分析特征重要性
def analyze_features(model, feature_names):
    # 根据线性模型的权重系数，创建特征重要性DataFrame
    importance = pd.DataFrame({'Feature': feature_names,'Coefficient': model.coef_[0],'Abs_Importance': abs(model.coef_[0])}).sort_values('Abs_Importance', ascending=False)
    print("\nTop 10 features:")
    print(importance.head(10))
    
    # 可视化
    plt.figure(figsize=(10, 6))
    colors = ['red' if x > 0 else 'green' for x in importance.head(10)['Coefficient']]
    bars = plt.barh(importance.head(10)['Feature'],importance.head(10)['Coefficient'],color=colors)
    
    # 添加标签
    for bar in bars:
        width = bar.get_width()
        plt.text(width/2,bar.get_y() + bar.get_height()/2,f'{width:.3f}',va='center',ha='center')
    # 标注
    plt.axvline(x=0, color='gray', linestyle='--')
    plt.title('Feature Importance (Red=Positive, Green=Negative)')
    plt.xlabel('Coefficient value')
    plt.ylabel('Feature name')
    plt.tight_layout()
    plt.savefig('feature_importance.png', dpi=120, bbox_inches='tight')
    plt.close()

    return importance

def safe_division(a, b, default=0):
    """处理除零和异常值"""
    with np.errstate(divide='ignore', invalid='ignore'):
        res = np.divide(a, b)
        res[~np.isfinite(res)] = default
    return res

def add_composite_features(X_train, X_val, X_test):
    # 1. 计算最小绝对值
    min_abs = np.abs(X_train['days_to_first_login'][X_train['days_to_first_login'] != 0]).min()
    # 2. 定义转换函数
    def make_positive(b):
        return np.where(b >= 0, b + min_abs, b - min_abs)
    # 3. 添加effective_study特征
    for df in [X_train, X_val, X_test]:
        denominator = make_positive(df['days_to_first_login'])
        df['effective_study'] = safe_division(df['core_content_clicks'] * df['login_days'],denominator,default=df['core_content_clicks'].median())
        df['study_efficiency'] = safe_division(df['core_content_clicks'],df['days_to_first_login'].abs() + 1, default=0)
    return X_train, X_val, X_test

def main():
    # 1. 数据准备
    X_train, X_val, X_test, y_train, y_val, y_test = load_data()
    X_train, X_val, X_test = add_composite_features(X_train, X_val, X_test)
    
    # 打印数据分布
    print("\n数据分布:")
    print(f"训练集: {X_train.shape[0]}样本 | 正样本: {y_train.mean():.1%}")
    print(f"验证集: {X_val.shape[0]}样本 | 正样本: {y_val.mean():.1%}")
    print(f"测试集: {X_test.shape[0]}样本 | 正样本: {y_test.mean():.1%}")

    # 2. 数据标准化
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_val_scaled = scaler.transform(X_val)
    X_test_scaled = scaler.transform(X_test)

    # 3. 模型训练
    model, train_time = train_model(X_train_scaled, y_train)  # 接收训练时间
    print(f"模型训练完成，耗时: {train_time:.2f}秒")  # 显示训练时间

    # 4. 模型评估
    evaluate_model(model, X_val_scaled, y_val, "验证")
    evaluate_model(model, X_test_scaled, y_test, "测试")

    # 5. 特征分析
    analyze_features(model, X_train.columns)

if __name__ == "__main__":
    main()