import numpy as np
import pandas as pd
from datetime import datetime

from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split


def clean_data(df):
    # 处理年龄异常值
    df['age'] = df['age'].replace({'unset': np.nan, 9: np.nan})
    df['age'] = df['age'].fillna(df['age'].median())

    # 处理文本字段
    text_cols = ['Summary', 'book_title', 'book_author']
    for col in text_cols:
        df[col] = df[col].fillna('').astype(str)

    # 处理分类特征缺失值
    cat_cols = ['Category', 'city', 'state', 'country']
    df[cat_cols] = df[cat_cols].fillna('missing')

    # 处理出版年份异常值
    current_year = datetime.now().year
    df['year_of_publication'] = df['year_of_publication'].apply(
        lambda x: x if 1800 < x <= current_year else df['year_of_publication'].mode()[0])

    return df


from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import OneHotEncoder, StandardScaler


class ItemSelector(BaseEstimator, TransformerMixin):
    def __init__(self, key):
        self.key = key  # 需要选择的列名（单个字符串或列名列表）

    def fit(self, X, y=None):
        return self

    def transform(self, X):
        # 从DataFrame中选择指定列
        return X[self.key]  # 返回的是DataFrame或Series


# 构建特征处理Pipeline
preprocessor = FeatureUnion([
    # 文本特征处理
    ('text', Pipeline([
        ('selector', ItemSelector(key='Summary')),
        ('tfidf', TfidfVectorizer(max_features=500))
    ])),

    # 类别特征处理
    ('category', Pipeline([
        ('selector', ItemSelector(key=['Category', 'publisher'])),
        ('onehot', OneHotEncoder(handle_unknown='ignore', sparse_output=True))
    ])),

    # 数值特征处理
    ('numeric', Pipeline([
        ('selector', ItemSelector(key=['age', 'year_of_publication'])),
        ('scaler', StandardScaler())
    ]))
])


def load_and_preprocess_data(epoch):
    # 加载原始数据
    raw_data = pd.read_csv(
        '../ml-latest-small/book_crossing.csv',
        index_col=0  # 直接指定第一列为索引不加载为数据列
    )
    # 特征标签分离时显式指定需要保留的特征列
    keep_features = [
        'age', 'book_title', 'book_author', 'year_of_publication',
        'publisher', 'Summary', 'Category', 'city', 'state', 'country'
    ]
    # 执行数据清洗
    cleaned_data = clean_data(raw_data)

    # 特征标签分离
    X = cleaned_data[keep_features]  # 显式选择特征列
    y = cleaned_data['rating']

    X = X[epoch * 10000: epoch * 10000 + 10000]
    y = y[epoch * 10000: epoch * 10000 + 10000]

    # 2. 先拆分数据集
    X_train_raw, X_test_raw, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42)

    # 3. 预处理流程
    preprocessor.fit(X_train_raw)
    X_train_processed = preprocessor.transform(X_train_raw)
    X_test_processed = preprocessor.transform(X_test_raw)

    return X_train_processed, X_test_processed, y_train, y_test
