import pandas as pd
import numpy as np
import os
import pickle
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from SimpleDataLoader import SimplifiedFeatureExtractor

class MultiModuleFeatureExtractor:
    # MultiModuleFeatureExtractor类用于处理多个模块的数据提取和特征准备
    def __init__(self, student_info, student_vle, student_assessment, student_registration, vle, assessments, 
                 modules=['AAA', 'CCC','BBB'], cache_dir='./feature_cache'):
        """        
        参数:
            modules: 要处理的模块列表
            cache_dir: 特征缓存目录
        """
        self.modules = modules
        self.cache_dir = cache_dir
        os.makedirs(self.cache_dir, exist_ok=True)
        
        # 存储每个模块的提取器实例
        self.extractors = {}
        
        print(f"正在初始化数据（加载{len(modules)}个模块）...")
        for module in modules:
            print(f"\n初始化模块 {module}...")
            self.extractors[module] = SimplifiedFeatureExtractor(
                student_info[student_info['code_module'] == module].copy(),
                student_vle[student_vle['code_module'] == module].copy(),
                student_assessment.copy(), 
                student_registration[student_registration['code_module'] == module].copy(),
                vle[vle['code_module'] == module].copy(),
                assessments[assessments['code_module'] == module].copy(),
                cache_dir=cache_dir
            )
    
    # 准备合并多个模块的训练数据
    def prepare_combined_training_data(self, use_cache=True):
        cache_suffix = "combined_train_data"
        if use_cache:
            cached_data = self._load_features(cache_suffix)
            if cached_data is not None:
                return cached_data
        
        all_X = []
        all_y = []
        
        for module, extractor in self.extractors.items():
            print(f"\n正在处理模块 {module}...")
            X, y = extractor.prepare_training_data(use_cache=use_cache)
            X = X.select_dtypes(include=[np.number])
            all_X.append(X)
            all_y.append(y)
        
        # 合并所有模块数据
        combined_X = pd.concat(all_X, axis=0, ignore_index=True)
        combined_y = pd.concat(all_y, axis=0, ignore_index=True)
        result = (combined_X, combined_y)
        
        if use_cache:
            self._save_features(result, cache_suffix)
        
        print(f"\n合并完成！总样本数: {len(combined_X)}")
        print(f"特征维度: {combined_X.shape[1]} (与单模块一致)")
        return result
    
    # 准备合并多个模块的训练-验证-测试分割
    def prepare_combined_train_val_test_split(self, test_size=0.1, val_size=0.1, random_state=42, use_cache=True):
        """
        参数:
            test_size: 测试集比例
            val_size: 验证集比例
            random_state: 随机种子
            use_cache: 是否使用缓存
        """
        cache_suffix = f"combined_train_val_test_split_{test_size}_{val_size}_{random_state}"
        if use_cache:
            cached_data = self._load_features(cache_suffix)
            if cached_data is not None:
                return cached_data
        
        print("\n正在准备合并数据集...")
        X, y = self.prepare_combined_training_data(use_cache=use_cache)
        
        # 首先分割出测试集
        X_train_val, X_test, y_train_val, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state,stratify=y
        )
        
        # 然后从剩余数据中分割出验证集
        adjusted_val_size = val_size / (1 - test_size)
        X_train, X_val, y_train, y_val = train_test_split(X_train_val, y_train_val,test_size=adjusted_val_size,random_state=random_state,stratify=y_train_val)
        
        result = (X_train, X_val, X_test, y_train, y_val, y_test)
        
        if use_cache:
            self._save_features(result, cache_suffix)
        
        print("\n合并数据分割完成！")
        print(f"训练集大小: {len(X_train)}, 验证集大小: {len(X_val)}, 测试集大小: {len(X_test)}")
        print(f"正样本比例 - 训练集: {y_train.mean():.2%}, 验证集: {y_val.mean():.2%}, 测试集: {y_test.mean():.2%}")
        return result
    
    ### 辅助方法：辅助特征获取缓存 ###
    def _get_cache_filename(self, suffix):
        """生成缓存文件名"""
        return os.path.join(self.cache_dir, f"multi_module_{suffix}.pkl")
    
    def _save_features(self, data, suffix):
        """保存特征到文件"""
        filename = self._get_cache_filename(suffix)
        with open(filename, 'wb') as f:
            pickle.dump(data, f)
        print(f"特征已保存到: {filename}")
    
    def _load_features(self, suffix):
        """从文件加载特征"""
        filename = self._get_cache_filename(suffix)
        if os.path.exists(filename):
            with open(filename, 'rb') as f:
                print(f"从缓存加载特征: {filename}")
                return pickle.load(f)
        return None

