# -*- coding: utf-8 -*-

"""
qwen2.5-max:

算法              适用场景                    优点                                  缺点
ContentKNN       冷启动、内容丰富领域          冷启动友好，可解释性强                   对内容特征依赖高
User/Item CF     用户/物品兴趣相似性           简单易实现，适合中小规模数据              数据稀疏性问题严重
AutoRec          隐式反馈、高维稀疏矩阵         捕捉非线性关系，隐式反馈数据表现好         硬件资源要求高，训练时间长
RBM              隐式反馈、特征提取            捕捉隐含特征，适合小规模数据               训练复杂，调参难度大
Hybrid           复杂推荐需求、多源数据融合     综合多种算法优势，推荐效果好               实现复杂，维护成本高
SVD              显式反馈、矩阵稀疏性          简单高效，推荐准确                        冷启动问题，隐式反馈支持弱
GBDT-LR          特征工程复杂、CTR 预估        捕捉高阶特征，结构化数据表现优异            对非结构化数据支持弱

"""

# RecsBakeOff.py
from surprise import SVD, KNNBasic, SVDpp
from surprise import NormalPredictor
from surprise.model_selection import GridSearchCV

from Evaluator import Evaluator

import random
import numpy as np

from BookCrossingLoader import BookCrossingLoader
from ContentKNNAlgorithm import ContentKNNAlgorithm
from AutoRecAlgorithm import AutoRecAlgorithm
from RBMAlgorithm import RBMAlgorithm
from HybridAlgorithm import HybridAlgorithm

np.random.seed(0)
random.seed(0)


def LoadBookCrossingData(epoch=0, max_rows=1000):
    bcl = BookCrossingLoader(
        item_path='../ml-latest-small/book_crossing.csv',
        epoch=epoch,
        max_rows=max_rows
    )
    print("Loading movie ratings...")
    data = bcl.load_dataset()
    print("\nComputing movie popularity ranks so we can measure novelty later...")
    rankings = bcl.get_popularity_ranks()

    return (bcl, data, rankings,)


def evaluate(epoch, max_rows=1000):
    # Load up common data set for the recommender algorithms
    (bcl, evaluationData, rankings,) = LoadBookCrossingData(epoch, max_rows=max_rows)

    # Construct an EvaLuator to,you know,evaluate them
    evaluator = Evaluator(evaluationData, rankings)

    # Throw in an SVD recommender
    # SVDAlgorithm = SVD(
    #     random_state=10
    # )
    # evaluator.AddAlgorithm(SVDAlgorithm, "SVD")

    # 基于内容的knn
    contentKNN = ContentKNNAlgorithm(k=40, cache_path='similarity_cache.joblib')
    evaluator.AddAlgorithm(contentKNN, "ContentKNN")

    # Just make random recommendations
    # Random = NormalPredictor()
    # evaluator.AddAlgorithm(Random, "Random")

    # User-based KNN
    UserKNN = KNNBasic(sim_options={'name': 'cosine', 'user_based': True})
    evaluator.AddAlgorithm(UserKNN, "User KNN")

    # Item-based KNN
    ItemKNN = KNNBasic(sim_options={'name': 'cosine', 'user_based': False})
    evaluator.AddAlgorithm(ItemKNN, "Item KNN")

    # SVD++
    SVDPlusPlus = SVDpp()
    evaluator.AddAlgorithm(SVDPlusPlus, "SVD++")

    # SVD tuning
    print("Searching for best parameters...")
    param_grid = {'n_epochs': [20, 30], 'lr_all': [0.005, 0.010],
                  'n_factors': [50, 100]}
    gs = GridSearchCV(SVD, param_grid, measures=['rmse', 'mae'], cv=3)
    gs.fit(evaluationData)
    # best RMSE score
    print("Best RMSE score attained: ", gs.best_score['rmse'])
    # combination of parameters that gave the best RMSE score
    print(gs.best_params['rmse'])
    params = gs.best_params['rmse']
    SVDtuned = SVD(n_epochs=params['n_epochs'], lr_all=params['lr_all'], n_factors=params['n_factors'])
    evaluator.AddAlgorithm(SVDtuned, "SVD - Tuned")

    # --- deeplearn ---
    # Autoencoder
    AutoRec = AutoRecAlgorithm()
    evaluator.AddAlgorithm(AutoRec, "AutoRec")

    # RBM
    RBM = RBMAlgorithm(epochs=20)
    evaluator.AddAlgorithm(RBM, "RBM")

    # Combine them
    Hybrid = HybridAlgorithm([AutoRec, SVDtuned], [0.7, 0.3])
    evaluator.AddAlgorithm(Hybrid, "Hybrid")

    # Fight!
    evaluator.Evaluate(True)
    # evaluator.SampleTopNRecs(bcl, testSubject=67544)


for epoch in range(0, 800):
    print(f'start {epoch}')
    evaluate(epoch, max_rows=1000)
    print(f'end {epoch}')


def SimpleTopN():
    bcl = BookCrossingLoader(
        item_path='../ml-latest-small/book_crossing.csv',
        max_rows=20000
    )
    print("Loading movie ratings...")
    data = bcl.load_dataset()
    print("\nComputing movie popularity ranks so we can measure novelty later...")
    rankings = bcl.get_popularity_ranks()

    evaluator = Evaluator(data, rankings)

    # SVD tuning
    print("Searching for best parameters...")
    param_grid = {'n_epochs': [20, 30], 'lr_all': [0.005, 0.010],
                  'n_factors': [50, 100]}
    gs = GridSearchCV(SVD, param_grid, measures=['rmse', 'mae'], cv=3)
    gs.fit(data)
    # best RMSE score
    print("Best RMSE score attained: ", gs.best_score['rmse'])
    # combination of parameters that gave the best RMSE score
    print(gs.best_params['rmse'])
    params = gs.best_params['rmse']
    SVDtuned = SVD(n_epochs=params['n_epochs'], lr_all=params['lr_all'], n_factors=params['n_factors'])

    # Autoencoder
    AutoRec = AutoRecAlgorithm()

    Hybrid = HybridAlgorithm([AutoRec, SVDtuned], [0.7, 0.3])
    evaluator.AddAlgorithm(Hybrid, "Hybrid")

    evaluator.SampleTopNRecs(bcl, testSubject=8)


SimpleTopN()
