
"""
MovieLens-100K Preprocessing Pipeline
Implements data loading, Z-score normalization, and cold-start split as per paper Section 3.1
"""

import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split

def preprocess_ml100k(data_path, cold_start_threshold=5):
    """
    Args:
        data_path: Path to u.data file
        cold_start_threshold: Users/items with ≤5 interactions marked as cold-start
    Returns:
        Preprocessed data with train/val/test splits
    """
    # Load raw data
    columns = ['user_id', 'item_id', 'rating', 'timestamp']
    raw_data = pd.read_csv(data_path, sep='\t', names=columns)
    
    # Identify cold-start users/items
    user_counts = raw_data['user_id'].value_counts()
    item_counts = raw_data['item_id'].value_counts()
    cold_users = user_counts[user_counts <= cold_start_threshold].index
    cold_items = item_counts[item_counts <= cold_start_threshold].index
    
    # Create interaction matrix
    interaction_matrix = pd.pivot_table(
        raw_data, 
        values='rating', 
        index='user_id', 
        columns='item_id',
        fill_value=0
    )
    
    # Z-score normalization per user
    user_mean = interaction_matrix.mean(axis=1)
    user_std = interaction_matrix.std(axis=1)
    normalized_matrix = interaction_matrix.sub(user_mean, axis=0).div(user_std, axis=0)
    
    # Split data (8:1:1)
    train, temp = train_test_split(normalized_matrix, test_size=0.2)
    val, test = train_test_split(temp, test_size=0.5)
    
    return {
        'train': train,
        'val': val,
        'test': test,
        'cold_users': cold_users,
        'cold_items': cold_items
    }
