# File: fedentgate/utils/data_loader.py
"""Dataset Partitioning and Preprocessing Utilities"""

import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split

class DataPartitioner:
    def __init__(self, dataset_name, seed=42):
        """
        Initialize dataset partitioner
        Args:
            dataset_name: Name of dataset ('movielens' or 'taobao')
            seed: Random seed for reproducibility
        """
        self.dataset_name = dataset_name
        self.seed = seed
        np.random.seed(seed)
        
    def load_movielens(self, data_path):
        """
        Load and preprocess MovieLens-20M dataset
        Args:
            data_path: Path to MovieLens data file
        Returns:
            Preprocessed DataFrame with columns: ['userId', 'movieId', 'rating', 'timestamp']
        """
        df = pd.read_csv(data_path)
        # Convert timestamp to datetime
        df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
        return df
    
    def load_taobao(self, data_path):
        """
        Load and preprocess Taobao user behavior logs
        Args:
            data_path: Path to Taobao data file
        Returns:
            Preprocessed DataFrame with columns: ['user_id', 'session_id', 'behavior', 'item_id', 'timestamp', 'location', 'device']
        """
        df = pd.read_csv(data_path)
        # Convert timestamp to datetime
        df['timestamp'] = pd.to_datetime(df['timestamp'])
        return df
    
    def temporal_split(self, df, train_start, train_end, test_start, test_end):
        """
        Split dataset based on temporal boundaries
        Args:
            df: Input DataFrame with timestamp column
            train_start: Start of training period
            train_end: End of training period
            test_start: Start of test period
            test_end: End of test period
        Returns:
            train_df, test_df: Split datasets
        """
        train_mask = (df['timestamp'] >= train_start) & (df['timestamp'] <= train_end)
        test_mask = (df['timestamp'] >= test_start) & (df['timestamp'] <= test_end)
        return df[train_mask], df[test_mask]
    
    def stratified_split(self, df, stratify_col, ratios=(0.7, 0.1, 0.2)):
        """
        Stratified random sampling based on specified column
        Args:
            df: Input DataFrame
            stratify_col: Column to use for stratification
            ratios: Tuple of (train_ratio, val_ratio, test_ratio)
        Returns:
            train_df, val_df, test_df: Split datasets
        """
        unique_ids = df[stratify_col].unique()
        train_ids, test_ids = train_test_split(unique_ids, test_size=ratios[2], random_state=self.seed)
        train_ids, val_ids = train_test_split(train_ids, test_size=ratios[1]/(ratios[0]+ratios[1](@ref), random_state=self.seed)
        
        train_df = df[df[stratify_col].isin(train_ids)]
        val_df = df[df[stratify_col].isin(val_ids)]
        test_df = df[df[stratify_col].isin(test_ids)]
        
        return train_df, val_df, test_df
    
    def preprocess(self, data_path):
        """
        Main preprocessing pipeline
        Args:
            data_path: Path to dataset file
        Returns:
            Processed datasets based on configuration
        """
        if self.dataset_name == 'movielens':
            df = self.load_movielens(data_path)
            # Temporal split: 2010-2018 train, 2019-2020 test
            train_df, test_df = self.temporal_split(
                df, 
                train_start='2010-01-01', 
                train_end='2018-12-31',
                test_start='2019-01-01',
                test_end='2020-12-31'
            )
            # Stratified split by user ID
            train_df, val_df, test_df = self.stratified_split(
                train_df, 
                stratify_col='userId',
                ratios=(0.7, 0.1, 0.2)
            )
            return {
                'train': train_df,
                'val': val_df,
                'test': test_df
            }
            
        elif self.dataset_name == 'taobao':
            df = self.load_taobao(data_path)
            # Stratified split by session ID
            train_df, val_df, test_df = self.stratified_split(
                df, 
                stratify_col='session_id',
                ratios=(0.6, 0.1, 0.3)
            )
            return {
                'train': train_df,
                'val': val_df,
                'test': test_df
            }
        
        else:
            raise ValueError(f"Unsupported dataset: {self.dataset_name}")

    def compute_data_entropy(self, df, class_col='rating'):
        """
        Compute information entropy of dataset
        Args:
            df: Input DataFrame
            class_col: Column containing class labels
        Returns:
            entropy: Scalar entropy value
        """
        class_counts = df[class_col].value_counts()
        probs = class_counts / class_counts.sum()
        return -np.sum(probs * np.log(probs + 1e-9))
