import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from scipy.sparse import csr_matrix, save_npz
import logging
from typing import Dict, Tuple

class FeatureEngineer:
    """
    特征工程师 - 优化版本，使用稀疏矩阵处理大数据
    """
    
    def __init__(self, config: dict):
        self.config = config
        self.logger = logging.getLogger(__name__)
        self.feature_scalers = {}
    
    def create_rating_matrix(self, ratings_df: pd.DataFrame) -> csr_matrix:
        """创建稀疏评分矩阵 - 内存优化版本"""
        if ratings_df is None:
            return None
            
        self.logger.info("创建稀疏评分矩阵...")
        
        # 创建用户和电影的映射
        unique_users = sorted(ratings_df['userId'].unique())
        unique_movies = sorted(ratings_df['movieId'].unique())
        
        user_id_map = {user_id: idx for idx, user_id in enumerate(unique_users)}
        movie_id_map = {movie_id: idx for idx, movie_id in enumerate(unique_movies)}
        
        # 使用稀疏矩阵构建
        row_indices = ratings_df['userId'].map(user_id_map)
        col_indices = ratings_df['movieId'].map(movie_id_map)
        ratings = ratings_df['rating'].values
        
        # 创建CSR稀疏矩阵
        rating_matrix = csr_matrix(
            (ratings, (row_indices, col_indices)),
            shape=(len(unique_users), len(unique_movies))
        )
        
        # 保存映射关系
        self.user_id_map = user_id_map
        self.movie_id_map = movie_id_map
        
        self.logger.info(f"创建稀疏评分矩阵: {rating_matrix.shape} (稀疏度: {1 - rating_matrix.nnz / (rating_matrix.shape[0] * rating_matrix.shape[1]):.6f})")
        return rating_matrix
    
    def create_movie_features(self, movies_df: pd.DataFrame, ratings_df: pd.DataFrame) -> pd.DataFrame:
        """优化电影特征创建 - 分批处理"""
        if movies_df is None:
            return None
            
        self.logger.info("创建电影特征（优化版本）...")
        movie_features = movies_df.copy()
        
        # 只处理有评分的电影（大幅减少数据量）
        if ratings_df is not None:
            rated_movies = ratings_df['movieId'].unique()
            movie_features = movie_features[movie_features['movieId'].isin(rated_movies)]
        
        # 分批处理类型特征，避免内存爆炸
        all_genres = set()
        for genres in movie_features['genres_list']:
            if isinstance(genres, list):
                all_genres.update(genres)
        
        # 限制类型数量，只保留常见的类型
        common_genres = list(all_genres)[:20]  # 只取前20个常见类型
        
        for genre in common_genres:
            movie_features[f'genre_{genre}'] = movie_features['genres_list'].apply(
                lambda x: 1 if genre in x else 0
            )
        
        # 简化电影统计特征
        if ratings_df is not None:
            # 使用聚合查询而不是合并大表
            movie_stats = ratings_df.groupby('movieId').agg({
                'rating': ['count', 'mean']
            }).round(3)
            movie_stats.columns = ['rating_count', 'rating_mean']
            
            movie_features = movie_features.merge(
                movie_stats, on='movieId', how='left'
            )
            
            # 填充缺失值
            movie_features['rating_count'] = movie_features['rating_count'].fillna(0)
            movie_features['rating_mean'] = movie_features['rating_mean'].fillna(0)
        
        self.logger.info(f"创建电影特征: {movie_features.shape}")
        return movie_features
    
    def create_user_features(self, ratings_df: pd.DataFrame, movies_df: pd.DataFrame = None) -> pd.DataFrame:
        """优化用户特征创建 - 使用采样或聚合"""
        if ratings_df is None:
            return None
            
        self.logger.info("创建用户特征（优化版本）...")
        
        # 使用更高效的内存计算方法
        user_stats = ratings_df.groupby('userId').agg({
            'rating': ['count', 'mean', 'std'],
            'timestamp': 'min'
        }).round(3)
        
        user_stats.columns = ['rating_count', 'rating_mean', 'rating_std', 'first_rating']
        
        # 计算用户活跃天数（简化版）
        max_date = ratings_df['timestamp'].max()
        user_stats['activity_days'] = (
            (max_date - user_stats['first_rating']).dt.days + 1
        )
        user_stats['ratings_per_day'] = (
            user_stats['rating_count'] / user_stats['activity_days']
        ).round(3)
        
        # 移除不必要的列
        user_features = user_stats.drop('first_rating', axis=1)
        
        self.logger.info(f"创建用户特征: {user_features.shape}")
        return user_features
    
    def create_interaction_features(self, ratings_df: pd.DataFrame) -> pd.DataFrame:
        """优化交互特征 - 只保留必要列"""
        if ratings_df is None:
            return None
            
        self.logger.info("创建交互特征（优化版本）...")
        
        # 只保留必要的列，减少内存使用
        interaction_features = ratings_df[['userId', 'movieId', 'rating', 'timestamp']].copy()
        
        # 简化时间衰减计算
        max_timestamp = interaction_features['timestamp'].max()
        interaction_features['days_since'] = (
            max_timestamp - interaction_features['timestamp']
        ).dt.days
        
        # 简单的衰减因子
        interaction_features['time_weight'] = 1 / (1 + interaction_features['days_since'] / 30)
        
        self.logger.info(f"创建交互特征: {interaction_features.shape}")
        return interaction_features
    
    def create_all_features(self, cleaned_data: Dict[str, pd.DataFrame]) -> Dict:
        """优化版本的特征创建 - 内存友好"""
        features = {}
        
        try:
            # 1. 先创建评分矩阵（稀疏版本）
            features['rating_matrix'] = self.create_rating_matrix(cleaned_data['ratings'])
            
            # 2. 创建其他特征（基于过滤后的数据）
            features['movie_features'] = self.create_movie_features(
                cleaned_data['movies'], cleaned_data['ratings']
            )
            features['user_features'] = self.create_user_features(cleaned_data['ratings'])
            features['interaction_features'] = self.create_interaction_features(cleaned_data['ratings'])
            
            # 保存映射关系
            features['user_id_map'] = getattr(self, 'user_id_map', {})
            features['movie_id_map'] = getattr(self, 'movie_id_map', {})
            
        except MemoryError as e:
            self.logger.error(f"内存不足: {e}")
            # 实现数据分块处理
            features = self._create_features_chunked(cleaned_data)
        
        return features
    
    def _create_features_chunked(self, cleaned_data: Dict) -> Dict:
        """数据分块处理 - 应对超大内存需求"""
        self.logger.info("使用分块处理策略处理超大评分数据...")
        
        ratings_df = cleaned_data['ratings']
        movies_df = cleaned_data['movies']
        chunk_size = 1000000  # 每次处理100万条记录
        
        features = {}
        
        try:
            # 1. 首先获取所有唯一的用户和电影ID（用于构建映射）
            self.logger.info("构建用户和电影ID映射...")
            all_user_ids = ratings_df['userId'].unique()
            all_movie_ids = ratings_df['movieId'].unique()
            
            user_id_map = {user_id: idx for idx, user_id in enumerate(sorted(all_user_ids))}
            movie_id_map = {movie_id: idx for idx, movie_id in enumerate(sorted(all_movie_ids))}
            
            # 2. 分块构建稀疏评分矩阵
            self.logger.info(f"分块构建稀疏矩阵，块大小: {chunk_size}")
            n_chunks = (len(ratings_df) + chunk_size - 1) // chunk_size
            
            # 初始化空的稀疏矩阵
            from scipy.sparse import lil_matrix
            rating_matrix = lil_matrix((len(all_user_ids), len(all_movie_ids)), dtype=np.float32)
            
            for i in range(n_chunks):
                start_idx = i * chunk_size
                end_idx = min((i + 1) * chunk_size, len(ratings_df))
                
                chunk = ratings_df.iloc[start_idx:end_idx]
                
                # 处理当前块
                for _, row in chunk.iterrows():
                    user_idx = user_id_map[row['userId']]
                    movie_idx = movie_id_map[row['movieId']]
                    rating_matrix[user_idx, movie_idx] = row['rating']
                
                self.logger.info(f"处理块 {i+1}/{n_chunks}: {start_idx}-{end_idx}")
            
            # 转换为CSR格式以节省内存
            rating_matrix_csr = rating_matrix.tocsr()
            features['rating_matrix'] = rating_matrix_csr
            features['user_id_map'] = user_id_map
            features['movie_id_map'] = movie_id_map
            
            self.logger.info(f"稀疏矩阵构建完成: {rating_matrix_csr.shape}, 非零元素: {rating_matrix_csr.nnz}")
            
            # 3. 分块计算用户特征
            self.logger.info("分块计算用户特征...")
            user_features_list = []
            
            for i in range(0, len(all_user_ids), 10000):  # 每次处理10000个用户
                user_batch = all_user_ids[i:i+10000]
                user_ratings = ratings_df[ratings_df['userId'].isin(user_batch)]
                
                if not user_ratings.empty:
                    user_stats = user_ratings.groupby('userId').agg({
                        'rating': ['count', 'mean', 'std'],
                        'timestamp': 'min'
                    }).round(3)
                    
                    user_stats.columns = ['rating_count', 'rating_mean', 'rating_std', 'first_rating']
                    user_stats['activity_days'] = (
                        (user_ratings['timestamp'].max() - user_stats['first_rating']).dt.days + 1
                    )
                    user_stats['ratings_per_day'] = (
                        user_stats['rating_count'] / user_stats['activity_days']
                    ).round(3)
                    
                    user_features_list.append(user_stats.drop('first_rating', axis=1))
                
                self.logger.info(f"用户特征处理进度: {min(i+10000, len(all_user_ids))}/{len(all_user_ids)}")
            
            # 合并所有用户特征
            if user_features_list:
                features['user_features'] = pd.concat(user_features_list)
            else:
                features['user_features'] = pd.DataFrame()
            
            # 4. 分块计算电影特征
            self.logger.info("分块计算电影特征...")
            movie_features = movies_df.copy()
            
            # 只处理有评分的电影
            rated_movies = ratings_df['movieId'].unique()
            movie_features = movie_features[movie_features['movieId'].isin(rated_movies)]
            
            # 分批处理类型特征
            all_genres = set()
            for genres in movie_features['genres_list']:
                if isinstance(genres, list):
                    all_genres.update(genres)
            
            common_genres = list(all_genres)[:20]  # 只取前20个常见类型
            
            for genre in common_genres:
                movie_features[f'genre_{genre}'] = movie_features['genres_list'].apply(
                    lambda x: 1 if genre in x else 0
                )
            
            # 分块计算电影统计特征
            movie_stats_list = []
            
            for i in range(0, len(rated_movies), 5000):  # 每次处理5000部电影
                movie_batch = rated_movies[i:i+5000]
                movie_ratings = ratings_df[ratings_df['movieId'].isin(movie_batch)]
                
                if not movie_ratings.empty:
                    batch_stats = movie_ratings.groupby('movieId').agg({
                        'rating': ['count', 'mean']
                    }).round(3)
                    batch_stats.columns = ['rating_count', 'rating_mean']
                    movie_stats_list.append(batch_stats)
                
                self.logger.info(f"电影特征处理进度: {min(i+5000, len(rated_movies))}/{len(rated_movies)}")
            
            # 合并电影统计特征
            if movie_stats_list:
                movie_stats = pd.concat(movie_stats_list)
                movie_features = movie_features.merge(movie_stats, on='movieId', how='left')
            
            movie_features['rating_count'] = movie_features['rating_count'].fillna(0)
            movie_features['rating_mean'] = movie_features['rating_mean'].fillna(0)
            features['movie_features'] = movie_features
            
            # 5. 简化交互特征（只保存必要的）
            self.logger.info("创建简化版交互特征...")
            # 对于大数据集，只保存抽样的小部分用于分析
            sample_size = min(100000, len(ratings_df))
            interaction_sample = ratings_df.sample(n=sample_size, random_state=42)[
                ['userId', 'movieId', 'rating', 'timestamp']
            ].copy()
            
            max_timestamp = interaction_sample['timestamp'].max()
            interaction_sample['days_since'] = (max_timestamp - interaction_sample['timestamp']).dt.days
            interaction_sample['time_weight'] = 1 / (1 + interaction_sample['days_since'] / 30)
            
            features['interaction_features'] = interaction_sample
            features['is_sampled'] = True  # 标记这是抽样数据
            
            self.logger.info("分块处理完成！")
            
        except Exception as e:
            self.logger.error(f"分块处理过程中出错: {e}")
            # 如果分块处理也失败，使用极简版本
            features = self._create_minimal_features(cleaned_data)
        
        return features

    def _create_minimal_features(self, cleaned_data: Dict) -> Dict:
        """极简特征创建 - 最后的手段"""
        self.logger.info("使用极简特征创建策略...")
        
        ratings_df = cleaned_data['ratings']
        movies_df = cleaned_data['movies']
        
        features = {}
        
        try:
            # 只创建最必要的特征
            # 1. 用户基本特征
            user_features = ratings_df.groupby('userId').agg({
                'rating': ['count', 'mean']
            }).round(3)
            user_features.columns = ['rating_count', 'rating_mean']
            features['user_features'] = user_features
            
            # 2. 电影基本特征
            movie_features = movies_df[['movieId', 'title', 'genres']].copy()
            movie_stats = ratings_df.groupby('movieId').agg({
                'rating': ['count', 'mean']
            }).round(3)
            movie_stats.columns = ['rating_count', 'rating_mean']
            
            features['movie_features'] = movie_features.merge(movie_stats, on='movieId', how='inner')
            
            # 3. 只保存评分矩阵的统计信息，不保存完整矩阵
            matrix_info = {
                'n_users': ratings_df['userId'].nunique(),
                'n_movies': ratings_df['movieId'].nunique(), 
                'n_ratings': len(ratings_df),
                'sparsity': 1 - len(ratings_df) / (ratings_df['userId'].nunique() * ratings_df['movieId'].nunique())
            }
            features['rating_matrix_info'] = matrix_info
            
            # 4. 交互特征抽样
            sample_size = min(50000, len(ratings_df))
            features['interaction_sample'] = ratings_df.sample(n=sample_size, random_state=42)
            
            self.logger.info("极简特征创建完成")
            
        except Exception as e:
            self.logger.error(f"极简特征创建失败: {e}")
            features = {'error': str(e)}
        
        return features