import pandas as pd
import numpy as np
import logging
import os
from typing import Dict, Tuple
import pickle

from .data_loader import DataLoader
from .data_cleaner import DataCleaner
from .feature_engineer import FeatureEngineer
from .text_processor import TextProcessor
from scipy.sparse import save_npz

class DataPipeline:
    """
    数据预处理流水线 - 整合所有预处理步骤
    """
    
    def __init__(self, config_path: str = "config/data_config.yaml"):
        self.config_path = config_path
        self.logger = self._setup_logging()
        
        # 初始化各组件
        self.data_loader = DataLoader(config_path)
        self.data_cleaner = DataCleaner(self.data_loader.config)
        self.feature_engineer = FeatureEngineer(self.data_loader.config)
        self.text_processor = TextProcessor()
    
    def _setup_logging(self) -> logging.Logger:
        """设置日志"""
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
        )
        return logging.getLogger(__name__)
    
    def run_pipeline(self, save_results: bool = True) -> Dict[str, pd.DataFrame]:
        """
        运行完整的数据预处理流水线
        
        Args:
            save_results: 是否保存处理结果
            
        Returns:
            处理后的数据字典
        """
        self.logger.info("🚀 开始数据预处理流水线")
        
        # 1. 加载数据
        self.logger.info("📥 步骤1: 加载原始数据")
        raw_data = self.data_loader.load_raw_data()
        
        # 验证数据
        validation_results = self.data_loader.validate_data(raw_data)
        self.logger.info(f"数据验证结果: {validation_results}")
        
        if not all(validation_results.values()):
            self.logger.warning("部分数据验证失败，但继续处理")
        
        # 2. 数据清洗
        self.logger.info("🧹 步骤2: 数据清洗")
        cleaned_data = self.data_cleaner.clean_all_data(raw_data)
        
        # 3. 文本特征处理
        self.logger.info("📝 步骤3: 文本特征处理")
        text_features = self.text_processor.create_text_features(
            cleaned_data['movies'], cleaned_data['tags']
        )
        
        # 4. 特征工程
        self.logger.info("⚙️  步骤4: 特征工程")
        features = self.feature_engineer.create_all_features(cleaned_data)
        
        # 合并文本特征到电影特征中（按 movieId 对齐，避免索引对齐导致的 NaN）
        if text_features is not None and features['movie_features'] is not None:
            try:
                # 为文本特征补上 movieId 以便按键合并
                movies_df = cleaned_data['movies']
                text_with_id = pd.concat([movies_df[['movieId']].reset_index(drop=True), text_features.reset_index(drop=True)], axis=1)
                # 只保留在电影特征中的电影（通常是有评分的子集）
                if 'movieId' in features['movie_features'].columns:
                    features['movie_features'] = features['movie_features'].merge(text_with_id, on='movieId', how='left')
                else:
                    # 若 movie_features 的 movieId 在索引上
                    mf = features['movie_features'].reset_index().rename(columns={'index': 'movieId'})
                    features['movie_features'] = mf.merge(text_with_id, on='movieId', how='left')
                self.logger.info("✅ 文本特征已按 movieId 合并到电影特征")
            except Exception as e:
                self.logger.warning(f"⚠️ 合并文本特征失败，继续使用原电影特征: {e}")

        # 5. 基础数据清洗：确保 movie_features 的 movieId 无缺失且为整数类型
        if features.get('movie_features') is not None:
            try:
                mf = features['movie_features']
                # 如果 movieId 在列中则清洗列，否则尝试从索引恢复
                if 'movieId' not in mf.columns:
                    mf = mf.reset_index().rename(columns={'index': 'movieId'})
                # 去除缺失 movieId
                before = len(mf)
                mf = mf.dropna(subset=['movieId'])
                # 转为整数类型（安全转换）
                mf['movieId'] = pd.to_numeric(mf['movieId'], errors='coerce').astype('Int64')
                mf = mf.dropna(subset=['movieId'])
                mf['movieId'] = mf['movieId'].astype(int)
                # 去重，保留第一条
                mf = mf.drop_duplicates(subset=['movieId'])
                after = len(mf)
                if after != before:
                    self.logger.info(f"🧽 已清洗 movie_features：移除 {before - after} 行无效记录，并规范 movieId 类型为 int")
                features['movie_features'] = mf
            except Exception as e:
                self.logger.warning(f"⚠️ 清洗 movie_features 失败: {e}")
        
        # 6. 保存结果
        if save_results:
            self._save_processed_data(cleaned_data, features)
        
        self.logger.info("✅ 数据预处理流水线完成")
        
        return {
            'cleaned_data': cleaned_data,
            'features': features,
            'text_processor': self.text_processor,
            'feature_engineer': self.feature_engineer
        }
    
    def _save_processed_data(self, cleaned_data: Dict, features: Dict):
        """优化保存方法，处理稀疏矩阵"""
        output_dir = self.data_loader.config['data_paths']['processed']['output_dir']
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存清洗后的数据
        for name, data in cleaned_data.items():
            if data is not None and not isinstance(data, dict):
                file_path = os.path.join(output_dir, f'{name}_cleaned.pkl')
                
                # 对于大数据，使用压缩格式
                if len(data) > 1000000:
                    # 使用feather格式保存大数据
                    feather_path = os.path.join(output_dir, f'{name}_cleaned.feather')
                    data.reset_index().to_feather(feather_path)
                    self.logger.info(f"保存 {name} 数据到 {feather_path} (大数据格式)")
                else:
                    data.to_pickle(file_path)
                    self.logger.info(f"保存 {name} 数据到 {file_path}")
        
        # 保存特征数据
        for name, feature_data in features.items():
            if feature_data is not None:
                # 保存前的强校验与修复（尤其是 movie_features 的 movieId）
                if name == 'movie_features':
                    try:
                        mf = feature_data
                        if 'movieId' not in mf.columns:
                            mf = mf.reset_index().rename(columns={'index': 'movieId'})
                        mf = mf.dropna(subset=['movieId'])
                        mf['movieId'] = pd.to_numeric(mf['movieId'], errors='coerce').astype('Int64')
                        mf = mf.dropna(subset=['movieId'])
                        mf['movieId'] = mf['movieId'].astype(int)
                        mf = mf.drop_duplicates(subset=['movieId'])
                        feature_data = mf
                        features[name] = mf
                    except Exception as e:
                        self.logger.warning(f"⚠️ 保存前清洗 movie_features 失败: {e}")
                file_path = os.path.join(output_dir, f'{name}.pkl')
                
                # 特殊处理稀疏矩阵
                if hasattr(feature_data, 'format'):  # 稀疏矩阵
                    sparse_path = os.path.join(output_dir, f'{name}.npz')
                    save_npz(sparse_path, feature_data)
                    self.logger.info(f"保存稀疏矩阵 {name} 到 {sparse_path}")
                elif hasattr(feature_data, 'to_pickle'):
                    feature_data.to_pickle(file_path)
                    self.logger.info(f"保存 {name} 到 {file_path}")
                elif isinstance(feature_data, dict):
                    # 保存映射关系
                    with open(file_path, 'wb') as f:
                        pickle.dump(feature_data, f)
                    self.logger.info(f"保存 {name} 到 {file_path}")
    
    def load_processed_data(self) -> Dict:
        """加载处理后的数据"""
        output_dir = self.data_loader.config['data_paths']['processed']['output_dir']
        processed_data = {}
        
        try:
            # 加载清洗后的数据
            for name in ['movies', 'ratings', 'tags']:
                file_path = os.path.join(output_dir, f'{name}_cleaned.pkl')
                if os.path.exists(file_path):
                    processed_data[name] = pd.read_pickle(file_path)
            
            # 加载特征数据
            for feature_type in ['movie_features', 'user_features', 'interaction_features', 'rating_matrix']:
                file_path = os.path.join(output_dir, f'{feature_type}.pkl')
                if os.path.exists(file_path):
                    processed_data[feature_type] = pd.read_pickle(file_path)
            
            self.logger.info("✅ 成功加载处理后的数据")
            return processed_data
            
        except Exception as e:
            self.logger.error(f"❌ 加载处理数据失败: {e}")
            return {}

# 使用示例
if __name__ == "__main__":
    # 运行完整流水线
    pipeline = DataPipeline()
    results = pipeline.run_pipeline(save_results=True)
    
    print("🎉 预处理完成！")
    print(f"清洗后的数据形状:")
    for name, data in results['cleaned_data'].items():
        if hasattr(data, 'shape'):
            print(f"  {name}: {data.shape}")