import pandas as pd
import numpy as np
from typing import Dict, Tuple
import logging
from datetime import datetime

class DataCleaner:
    """
    数据清洗器 - 处理缺失值、异常值、数据一致性
    """
    
    def __init__(self, config: dict):
        self.config = config
        self.logger = logging.getLogger(__name__)
    
    def clean_movies_data(self, movies_df: pd.DataFrame) -> pd.DataFrame:
        """清洗电影数据"""
        if movies_df is None:
            return None
            
        df = movies_df.copy()
        
        # 处理缺失值
        initial_count = len(df)
        df = df.dropna(subset=['title', 'genres'])
        
        # 提取年份
        df['year'] = df['title'].str.extract(r'\((\d{4})\)').astype(float)
        
        # 处理类型数据
        df['genres'] = df['genres'].apply(
            lambda x: '(no genres listed)' if x == '' else x
        )
        
        # 拆分类型列
        df['genres_list'] = df['genres'].str.split('|')
        
        self.logger.info(f"电影数据清洗: {initial_count} -> {len(df)} 条记录")
        return df
    
    def clean_ratings_data(self, ratings_df: pd.DataFrame) -> pd.DataFrame:
        """清洗评分数据"""
        if ratings_df is None:
            return None
            
        df = ratings_df.copy()
        initial_count = len(df)
        
        # 移除重复评分（同一用户对同一电影的多次评分）
        df = df.drop_duplicates(subset=['userId', 'movieId'], keep='last')
        
        # 验证评分范围 (0.5-5.0)
        valid_ratings = df['rating'].between(0.5, 5.0)
        df = df[valid_ratings]
        
        # 转换时间戳
        df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
        df['rating_year'] = df['timestamp'].dt.year
        df['rating_month'] = df['timestamp'].dt.month
        
        # 移除评分时间异常的数据（假设数据在1990-2025年之间）
        valid_years = df['rating_year'].between(1990, 2025)
        df = df[valid_years]
        
        self.logger.info(f"评分数据清洗: {initial_count} -> {len(df)} 条记录")
        return df
    
    def clean_tags_data(self, tags_df: pd.DataFrame) -> pd.DataFrame:
        """清洗标签数据"""
        if tags_df is None:
            return None
            
        df = tags_df.copy()
        initial_count = len(df)
        
        # 处理缺失值
        df = df.dropna(subset=['tag'])
        
        # 移除重复标签
        df = df.drop_duplicates(subset=['userId', 'movieId', 'tag'])
        
        # 转换时间戳
        df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
        
        # 标签文本清洗
        df['tag'] = df['tag'].str.strip().str.lower()
        
        self.logger.info(f"标签数据清洗: {initial_count} -> {len(df)} 条记录")
        return df
    
    def filter_sparse_data(self, ratings_df: pd.DataFrame, movies_df: pd.DataFrame) -> Tuple[pd.DataFrame, dict]:
        """
        过滤稀疏数据 - 移除评分数量过少的用户和电影
        
        Args:
            ratings_df: 评分数据
            movies_df: 电影数据
            
        Returns:
            过滤后的评分数据和过滤统计信息
        """
        if ratings_df is None:
            return None, {}
            
        df = ratings_df.copy()
        initial_stats = {
            'total_users': df['userId'].nunique(),
            'total_movies': df['movieId'].nunique(),
            'total_ratings': len(df)
        }
        
        # 过滤评分数量过少的用户
        user_rating_counts = df['userId'].value_counts()
        active_users = user_rating_counts[
            user_rating_counts >= self.config['processing']['min_ratings_per_user']
        ].index
        df = df[df['userId'].isin(active_users)]
        
        # 过滤评分数量过少的电影
        movie_rating_counts = df['movieId'].value_counts()
        popular_movies = movie_rating_counts[
            movie_rating_counts >= self.config['processing']['min_ratings_per_movie']
        ].index
        df = df[df['movieId'].isin(popular_movies)]
        
        # 确保电影数据的一致性
        if movies_df is not None:
            available_movies = set(movies_df['movieId'].unique())
            df = df[df['movieId'].isin(available_movies)]
        
        final_stats = {
            'remaining_users': df['userId'].nunique(),
            'remaining_movies': df['movieId'].nunique(),
            'remaining_ratings': len(df),
            'user_removal_rate': 1 - (df['userId'].nunique() / initial_stats['total_users']),
            'movie_removal_rate': 1 - (df['movieId'].nunique() / initial_stats['total_movies']),
            'rating_removal_rate': 1 - (len(df) / initial_stats['total_ratings'])
        }
        
        self.logger.info(f"数据稀疏性过滤完成:")
        self.logger.info(f"  用户: {initial_stats['total_users']} → {final_stats['remaining_users']}")
        self.logger.info(f"  电影: {initial_stats['total_movies']} → {final_stats['remaining_movies']}")
        self.logger.info(f"  评分: {initial_stats['total_ratings']} → {final_stats['remaining_ratings']}")
        
        return df, final_stats
    
    def clean_all_data(self, data: Dict[str, pd.DataFrame]) -> Dict[str, pd.DataFrame]:
        """
        清洗所有数据
        
        Args:
            data: 原始数据字典
            
        Returns:
            清洗后的数据字典
        """
        cleaned_data = {}
        
        # 清洗各数据集
        cleaned_data['movies'] = self.clean_movies_data(data.get('movies'))
        cleaned_data['ratings'] = self.clean_ratings_data(data.get('ratings'))
        cleaned_data['tags'] = self.clean_tags_data(data.get('tags'))
        cleaned_data['links'] = data.get('links')  # 链接数据通常不需要特殊清洗
        
        # 过滤稀疏数据
        if cleaned_data['ratings'] is not None:
            filtered_ratings, filter_stats = self.filter_sparse_data(
                cleaned_data['ratings'], cleaned_data['movies']
            )
            cleaned_data['ratings'] = filtered_ratings
            cleaned_data['filter_stats'] = filter_stats
        
        return cleaned_data