from datetime import datetime

import joblib
import numpy as np
from surprise import AlgoBase

from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split

from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.ensemble import GradientBoostingRegressor

from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
from scipy.sparse import hstack


# 2. 特征选择类
class ItemSelector(BaseEstimator, TransformerMixin):
    def __init__(self, key):
        self.key = key  # 需要选择的列名（单个字符串或列名列表）

    def fit(self, X, y=None):
        return self

    def transform(self, X):
        # 从DataFrame中选择指定列
        return X[self.key]  # 返回的是DataFrame或Series


# 1. 构建特征处理Pipeline
preprocessor = FeatureUnion([
    # 文本特征处理
    ('text', Pipeline([
        ('selector', ItemSelector(key='Summary')),
        ('tfidf', TfidfVectorizer(max_features=500))
    ])),

    # 类别特征处理
    ('category', Pipeline([
        ('selector', ItemSelector(key=['Category', 'publisher'])),
        ('onehot', OneHotEncoder(handle_unknown='ignore', sparse_output=True))
    ])),

    # 数值特征处理
    ('numeric', Pipeline([
        ('selector', ItemSelector(key=['age', 'year_of_publication'])),
        ('scaler', StandardScaler())
    ]))
])


def clean_data(df):
    # 处理年龄异常值
    df['age'] = df['age'].replace({'unset': np.nan, 9: np.nan})
    df['age'] = df['age'].fillna(df['age'].median())

    # 处理文本字段
    text_cols = ['Summary', 'book_title', 'book_author']
    for col in text_cols:
        df[col] = df[col].fillna('').astype(str)

    # 处理分类特征缺失值
    cat_cols = ['Category', 'city', 'state', 'country']
    df[cat_cols] = df[cat_cols].fillna('missing')

    # 处理出版年份异常值
    current_year = datetime.now().year
    df['year_of_publication'] = df['year_of_publication'].apply(
        lambda x: x if 1800 < x <= current_year else df['year_of_publication'].mode()[0])

    return df


# 3. 获取GBDT叶节点索引作为新特征
def gbdt_feature_transform(model, X, encoder):
    leaf_ids = model.apply(X)
    return encoder.transform(leaf_ids)  # 始终使用训练好的encoder


# 2. GBDT特征生成器
gbdt = GradientBoostingRegressor(
    n_estimators=100,
    max_depth=5,
    subsample=0.8
)
# 在文件开头定义
gbdt_encoder = OneHotEncoder(sparse_output=True, handle_unknown='ignore')


def data_process(dataset):
    # 对新样本进行数据清洗
    cleaned_new_sample = clean_data(dataset)

    # 使用已训练的preprocessor进行特征预处理
    new_sample_processed = preprocessor.transform(cleaned_new_sample)

    # 使用GBDT生成叶节点特征
    new_sample_leaves = gbdt_encoder.transform(gbdt.apply(new_sample_processed))

    # 合并特征
    from scipy.sparse import hstack

    combined = hstack([new_sample_processed, new_sample_leaves])

    return combined


class GBDT_LR_Algorithm(AlgoBase):

    def __init__(self, epoch=10000):
        AlgoBase.__init__(self)
        self.cleaned_data = None
        self.epoch = epoch
        self.preprocessor = None
        self.gbdt = None
        self.gbdt_encoder = None
        self.lr = None

    def load_data(self):
        # 加载原始数据
        raw_data = pd.read_csv(
            '../ml-latest-small/book_crossing.csv',
            index_col=0  # 直接指定第一列为索引不加载为数据列
        )
        # 特征标签分离时显式指定需要保留的特征列
        keep_features = [
            'age', 'book_title', 'book_author', 'year_of_publication',
            'publisher', 'Summary', 'Category', 'city', 'state', 'country'
        ]
        # 执行数据清洗
        cleaned_data = clean_data(raw_data)
        self.cleaned_data = cleaned_data

    def fit(self, trainset):
        AlgoBase.fit(self, trainset)

        raw_data = trainset
        # 1. 数据预处理
        # 特征标签分离时显式指定需要保留的特征列
        keep_features = [
            'age', 'book_title', 'book_author', 'year_of_publication',
            'publisher', 'Summary', 'Category', 'city', 'state', 'country'
        ]
        # 执行数据清洗
        cleaned_data = clean_data(raw_data)

        # 特征标签分离
        X = cleaned_data[keep_features]  # 显式选择特征列
        y = cleaned_data['rating']

        # X = X[self.epoch * 10000: self.epoch * 10000 + 10000]
        # y = y[self.epoch * 10000:  self.epoch * 10000 + 10000]
        # =============================

        # 2. 先拆分数据集
        X_train_raw, X_test_raw, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42)

        # 3. 预处理流程
        preprocessor.fit(X_train_raw)
        X_train_processed = preprocessor.transform(X_train_raw)
        X_test_processed = preprocessor.transform(X_test_raw)

        # 4. 训练GBDT并生成特征
        gbdt.fit(X_train_processed, y_train)
        train_leaves = gbdt.apply(X_train_processed)
        gbdt_encoder.fit(train_leaves)  # 重要：只在训练集上fit

        # 5. 生成最终特征
        train_leaves = gbdt_encoder.transform(train_leaves)
        test_leaves = gbdt_encoder.transform(gbdt.apply(X_test_processed))

        # 6. 合并特征（保持相同维度）

        X_train_combined = hstack([X_train_processed, train_leaves])
        X_test_combined = hstack([X_test_processed, test_leaves])

        # 7. 训练和评估
        lr = LogisticRegression(max_iter=1000, C=0.1)
        lr.fit(X_train_combined, y_train)
        print(f"Test Accuracy: {lr.score(X_test_combined, y_test):.4f}")

        # 保存模型
        self.preprocessor = preprocessor
        self.gbdt = gbdt
        self.gbdt_encoder = gbdt_encoder
        self.lr = lr

        joblib.dump(preprocessor, 'preprocessor.pkl')
        joblib.dump(gbdt, 'gbdt.pkl')
        joblib.dump(gbdt_encoder, 'gbdt_encoder.pkl')
        joblib.dump(lr, 'lr.pkl')

        return self

    def estimate(self, dataset):
        combined = data_process(dataset)

        # 加载模型
        if self.preprocessor is None or self.gbdt is None or self.gbdt_encoder is None or self.lr is None:
            self.preprocessor = joblib.load('preprocessor.pkl')
            self.gbdt = joblib.load('gbdt.pkl')
            self.gbdt_encoder = joblib.load('gbdt_encoder.pkl')
            self.lr = joblib.load('lr.pkl')

        # 使用LR模型进行预测
        predictions = self.lr.predict(combined)
        # probabilities = self.lr.predict_proba(combined)

        # # 定义类别到评分的映射关系
        # category_to_rating = {
        #     0: 1,
        #     1: 2,
        #     2: 3,
        #     3: 4,
        #     4: 5,
        #     5: 6,
        #     6: 7,
        #     7: 8,
        #     8: 9,
        #     9: 10,
        #     10: 10,
        # }
        # # 转换预测类别为实际评分
        # ratings = [category_to_rating[pred] for pred in predictions]

        # 输出预测结果
        # for idx, (pred, prob, rating) in enumerate(zip(predictions, probabilities, ratings)):
        #     print(f"Sample {idx + 1}:")
        #     print(f"Predicted Class: {pred}")
        #     print(f"Class Probabilities: {prob}")
        #     print(f"Predicted Rating: {rating}")
        #     print()

        return predictions

    def get_top_n_recommendations(self, dataset, user_id, top_n=5):
        """
        获取用户可能喜欢的 Top-N 推荐结果

        参数:
        - model: 训练好的 GBDT_LR_Algorithm 模型实例
        - dataset: 包含用户-物品对的 DataFrame
        - user_id: 目标用户的 ID
        - top_n: 返回的推荐数量，默认为 5

        返回:
        - top_n_recommendations: 包含 Top-N 推荐物品及其预测评分的列表
        """
        # 确保数据集中不包含目标用户的数据 (用户未评分)
        user_data = dataset[dataset['user_id'] != user_id]

        if user_data.empty:
            raise ValueError(f"User {user_id} not found in the dataset.")

        # 使用模型预测评分
        predictions = self.estimate(user_data)

        # 将预测评分与物品信息关联
        user_data['predicted_rating'] = predictions

        # 按预测评分降序排序，并取 Top-N
        top_n_recommendations = (
            user_data.sort_values(by='predicted_rating', ascending=False)
            # user_data.sort_values(by='predicted_rating', ascending=True)
            .head(top_n)[['book_title', 'predicted_rating']]
        )

        # print(user_data)

        return top_n_recommendations
