import pandas as pd
import numpy as np
import os
import pickle
from sklearn.model_selection import train_test_split
from tqdm import tqdm

class SimplifiedFeatureExtractor:
    # 单个模块特征提取器
    def __init__(self, student_info, student_vle, student_assessment, student_registration, vle, assessments, 
                 target_module='CCC', cache_dir='./feature_cache'):
        """
        参数:
            target_module: 要分析的课程模块代码（默认'CCC'）
            cache_dir: 特征缓存目录
        """
        self.cache_dir = cache_dir
        self.target_module = target_module
        os.makedirs(self.cache_dir, exist_ok=True)
        
        print(f"正在初始化数据（加载{target_module}模块）...")
        with tqdm(total=6, desc="加载数据表") as pbar:
            self.student_info = student_info[student_info['code_module'] == target_module].copy()
            pbar.update(1)
            self.student_vle = student_vle[student_vle['code_module'] == target_module].copy()
            pbar.update(1)
            self.student_assessment = student_assessment.copy()
            pbar.update(1)
            self.student_registration = student_registration[
                student_registration['code_module'] == target_module].copy()
            pbar.update(1)
            self.vle = vle[vle['code_module'] == target_module].copy()
            pbar.update(1)
            self.assessments = assessments[assessments['code_module'] == target_module].copy()
            pbar.update(1)
        
        print(f"初始化完成，{target_module}模块记录数: {len(self.student_info)}")
        
    # 提取VLE行为特征（11个核心特征），前四周数据
    def _extract_vle_features(self, student_id, code_module, code_presentation):
        # 合并获取activity_type
        vle_merged = pd.merge(self.student_vle,self.vle[['id_site', 'code_module', 'code_presentation', 'activity_type']],on=['id_site', 'code_module', 'code_presentation'],how='left')
        vle_data = vle_merged[(vle_merged['id_student'] == student_id) &(vle_merged['code_module'] == code_module) &(vle_merged['code_presentation'] == code_presentation) &(vle_merged['date'] <= 28)]
        features = {}

        # 基础特征
        features['login_days'] = vle_data['date'].nunique()
        features['total_clicks'] = vle_data['sum_click'].sum()
        features['forum_clicks'] = vle_data[vle_data['activity_type'] == 'forumng']['sum_click'].sum()
        features['core_content_clicks'] = vle_data[vle_data['activity_type'].isin(['oucontent', 'resource','url'])]['sum_click'].sum()
        features['quiz_clicks'] = vle_data[vle_data['activity_type'].isin(['quiz', 'externalquiz'])]['sum_click'].sum()
        features['days_to_first_login'] = vle_data['date'].min() if not vle_data.empty else 28
        
        #增强特征: 最长连续学习天数
        login_dates = sorted(vle_data['date'].unique())
        max_streak = 0
        current_streak = 1 if login_dates else 0
        for i in range(1, len(login_dates)):
            if login_dates[i] == login_dates[i-1] + 1:
                current_streak += 1
                max_streak = max(max_streak, current_streak)
            else:
                current_streak = 1
        features['max_login_streak'] = max_streak
        
        # 增强特征: 资源类型覆盖度
        features['activity_type_diversity'] = vle_data['activity_type'].nunique() / 10  # 标准化
        
        # 增强特征3: 学习节奏波动性，28天内的点击量标准差
        #1. 获取有记录的日期点击量
        daily_clicks = vle_data.groupby('date')['sum_click'].sum().values
        # 2. 补充缺失日期（点击量=0）
        n_missing_days = max(0, 28 - len(daily_clicks))
        adjusted_clicks = np.concatenate([daily_clicks, np.zeros(n_missing_days)])
        features['click_std_dev'] = adjusted_clicks.std()
        
        #特征4: 核心资源访问集中度
        features['core_resource_ratio'] = (
            features['core_content_clicks'] / features['total_clicks']
        ) if features['total_clicks'] > 0 else 0
        
        # 特征5：预习行为强度（课程开始前的点击量占比）
        pre_course_clicks = vle_data[vle_data['date'] < 0]['sum_click'].sum()
        features['pre_course_click_ratio'] = (
            pre_course_clicks / features['total_clicks'] 
            if features['total_clicks'] > 0 else 0
        )
        
        return features
    
    # 提取评估特征：平均分、提交作业数、平均延迟(前四周)
    def _extract_assessment_features(self, student_id, code_module, code_presentation):
        # 合并获取评估数据
        merged_assessments = pd.merge(self.student_assessment,self.assessments[['id_assessment', 'code_module', 'code_presentation', 'date']],on='id_assessment',how='left')
        
        early_assessments = merged_assessments[
            (merged_assessments['id_student'] == student_id) &
            (merged_assessments['code_module'] == code_module) &
            (merged_assessments['code_presentation'] == code_presentation) &
            (merged_assessments['date_submitted'] <= 28)
        ].copy()
        
        features = {}
        # 平均分、提交作业数、平均延迟
        features['avg_score'] = early_assessments['score'].mean() if not early_assessments.empty else 0
        features['n_submitted'] = early_assessments['score'].notnull().sum() if not early_assessments.empty else 0
        early_assessments['delay'] = early_assessments['date'] - early_assessments['date_submitted']
        features['avg_delay'] = early_assessments['delay'].mean() if not early_assessments.empty else 0
        return features
    
    # 提取人口统计特征：总共修读学分、注册天数、性别、残疾、最高学历、社会经济地位
    def _extract_demographic_features(self, student_id):
        student_data = self.student_info[self.student_info['id_student'] == student_id].iloc[0]
        features = {
            'studied_credits': student_data['studied_credits'],
            'registration_days': self.student_registration[
                self.student_registration['id_student'] == student_id
            ]['date_registration'].iloc[0],
            'is_male': 1 if student_data['gender'] == 'M' else 0,
            'has_disability': 1 if student_data['disability'] == 'Y' else 0,
            'high_education': 1 if student_data['highest_education'] in ['HE Qualification', 'A Level'] else 0,
            'deprivation': 1 if student_data['imd_band'] in ['0-10%', '10-20'] else 0
        }
        return features
    
    #所有特征提取
    def extract_features(self, student_id, code_module, code_presentation):
        features = {}
        # VLE行为特征（11个）
        vle_features = self._extract_vle_features(student_id, code_module, code_presentation)
        features.update(vle_features)
        # 评估特征（3个）
        assessment_features = self._extract_assessment_features(student_id, code_module, code_presentation)
        features.update(assessment_features)
        # 人口统计特征（6个）
        demo_features = self._extract_demographic_features(student_id)
        features.update(demo_features)
        return features
    
    def _get_cache_filename(self, suffix):
        """生成带模块名的缓存文件名"""
        return os.path.join(
            self.cache_dir, 
            f"{self.target_module}_features_{suffix}.pkl"
        )
    
    # 准备训练数据
    def prepare_training_data(self, use_cache=True):
        """
        参数: use_cache: 是否使用缓存
        """
        cache_suffix = "train_data"
        if use_cache:
            cached_data = self._load_features(cache_suffix)
            if cached_data is not None:
                return cached_data
        
        print("\n正在准备训练数据...")
        merged_data = pd.merge(self.student_info,self.student_registration,on=['code_module', 'code_presentation', 'id_student'],how='left')
        
        all_features = []
        labels = []
        
        for _, row in tqdm(merged_data.iterrows(), total=len(merged_data), desc="提取特征"):
            features = self.extract_features(row['id_student'],row['code_module'],row['code_presentation'])
            label = 1 if row['final_result'] in ['Fail', 'Withdrawn'] else 0
            all_features.append(features)
            labels.append(label)
        
        # 数据清洗
        X = pd.DataFrame(all_features)
        y = pd.Series(labels, name='label')
        
        # 处理NaN和无限值
        for col in X.columns:
            if X[col].isnull().any():
                fill_val = X[col].median() if X[col].dtype in ['int64', 'float64'] else X[col].mode()[0]
                X[col].fillna(fill_val, inplace=True)
            
            if X[col].dtype in ['int64', 'float64']:
                X[col] = X[col].replace([np.inf], X[col].max())
                X[col] = X[col].replace([-np.inf], X[col].min())
        
        result = (X, y)
        if use_cache:
            self._save_features(result, cache_suffix)
        
        print(f"特征提取完成！总特征数: {len(X.columns)}")
        return result
    
    # 准备和分割数据(前四周)
    def prepare_train_val_test_split(self, test_size=0.2, val_size=0.15, random_state=42, use_cache=True):
        """
            参数: 
            test_size: 测试集比例
            val_size: 验证集比例 
            random_state: 随机种子
            use_cache: 是否使用缓存
        """
        cache_suffix = f"train_val_test_split_{test_size}_{val_size}_{random_state}"
        if use_cache:
            cached_data = self._load_features(cache_suffix)
            if cached_data is not None:
                return cached_data
        
        X, y = self.prepare_training_data(use_cache=use_cache)
        
        # 先划分测试集
        X_train_val, X_test, y_train_val, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state,stratify=y)
        
        # 再划分验证集
        val_ratio = val_size / (1 - test_size)  # 计算相对比例
        X_train, X_val, y_train, y_val = train_test_split(X_train_val, y_train_val,test_size=val_ratio,random_state=random_state,stratify=y_train_val)
        result = (X_train, X_val, X_test, y_train, y_val, y_test)
        if use_cache:
            self._save_features(result, cache_suffix)
        print(f"数据分割完成！训练集: {len(X_train)}, 验证集: {len(X_val)}, 测试集: {len(X_test)}")
        return result
    
    def _save_features(self, data, suffix):
        """保存特征到文件"""
        filename = self._get_cache_filename(suffix)
        with open(filename, 'wb') as f:
            pickle.dump(data, f)
        print(f"特征已保存到: {filename}")
    
    def _load_features(self, suffix):
        """从文件加载特征"""
        filename = self._get_cache_filename(suffix)
        if os.path.exists(filename):
            with open(filename, 'rb') as f:
                print(f"从缓存加载特征: {filename}")
                return pickle.load(f)
        return None