"""
协同过滤
"""

import os
import numpy as np
import pandas as pd


DATA_PATH = './ml-latest-small/ratings.csv'
CACHE_DIR = './cache'


def load_data(filepath):
    cache_path = os.path.join(CACHE_DIR, 'ratings.cache')
    print('开始加载数据集...')
    if os.path.exists(cache_path):  # 判断是否存在缓存文件
        print('缓存加载中...')
        ratings = pd.read_pickle(cache_path)
        print('从缓存加载数据集完成')
    else:
        print('新数据加载中...')
        # 设置要加载字段的数据类型
        dtype = {'userId': np.int32, 'movieId': np.int32, 'rating': np.float32}
        # 加载数据前三列
        ratings = pd.read_csv(filepath, dtype=dtype, usecols=range(3))
        # 保存缓存文件
        ratings.to_pickle(cache_path)
        print('从新数据加载数据集完成')
    return ratings


class BaselineCFBySGD(object):
    """
    利用随机梯度下降方法实现基于基准测试的协同过滤
    """
    def __init__(self, epochs, alpha, reg, columns=('user_id', 'item_id', 'rating')):
        """
        :param epochs: 梯度下降最高迭代次数
        :param alpha: 学习率
        :param reg: 正则化系数
        :param columns: 字段名称
        """
        self.epochs = epochs
        self.alpha = alpha
        self.reg = reg
        self.columns = columns

    def fit(self, dataset):
        """
        拟合数据集
        :param dataset: ('user_id', 'item_id', 'rating')
        :return:
        """
        self.dataset = dataset
        # 用户评分数据
        self.user_ratings = dataset.groupby(self.columns[0]).agg([list])[[self.columns[1], self.columns[2]]]
        # 物品评分数据
        self.item_ratings = dataset.groupby(self.columns[1]).agg([list])[[self.columns[0], self.columns[2]]]
        # 全局平均分
        self.mean_rating = self.dataset[self.columns[2]].mean()
        # 调用sgd方法训练模型参数
        self.bu, self.bi = self.sgd()

    def sgd(self):
        """
        利用随机梯度下降优化bu、bi参数
        :return:
        """
        # 初始化
        bu = dict(zip(self.user_ratings.index, np.zeros(len(self.user_ratings))))
        bi = dict(zip(self.item_ratings.index, np.zeros(len(self.item_ratings))))
        for i in range(self.epochs):
            print('iter %d' % i)
            for user_id, item_id, rating in self.dataset.itertuples(index=False):
                # 核心代码块
                error = rating - (self.mean_rating + bu[user_id] + bi[item_id])
                bu[user_id] += self.alpha * (error - self.reg * bu[user_id])
                bi[item_id] += self.alpha * (error - self.reg * bi[item_id])
        return bu, bi

    def predict(self, user_id, item_id):
        """
        预测
        :param user_id:
        :param item_id:
        :return:
        """
        return self.mean_rating + self.bu[user_id] + self.bi[item_id]


class BaselineCFByALS(object):
    """
    利用交替最小二乘法实现基于基准测试的协同过滤
    """
    def __init__(self, epochs, reg_bu, reg_bi, columns=('user_id', 'item_id', 'rating')):
        """
        :param epochs: 梯度下降最高迭代次数
        :param reg_bu: bu的正则化系数
        :param reg_bi: bi的正则化系数
        :param columns: 字段名称
        """
        self.epochs = epochs
        self.reg_bu = reg_bu
        self.reg_bi = reg_bi
        self.columns = columns

    def fit(self, dataset):
        """
        拟合数据集
        :param dataset: ('user_id', 'item_id', 'rating')
        :return:
        """
        self.dataset = dataset
        # 用户评分数据
        self.user_ratings = dataset.groupby(self.columns[0]).agg([list])[[self.columns[1], self.columns[2]]]
        # 物品评分数据
        self.item_ratings = dataset.groupby(self.columns[1]).agg([list])[[self.columns[0], self.columns[2]]]
        # 全局平均分
        self.mean_rating = self.dataset[self.columns[2]].mean()
        # 调用als方法训练模型参数
        self.bu, self.bi = self.als()

    def als(self):
        """
        利用交替最小二乘法优化bu、bi参数
        :return:
        """
        # 初始化
        bu = dict(zip(self.user_ratings.index, np.zeros(len(self.user_ratings))))
        bi = dict(zip(self.item_ratings.index, np.zeros(len(self.item_ratings))))
        for i in range(self.epochs):
            print('iter %d' % i)
            for user_id, item_ids, ratings in self.user_ratings.itertuples(index=True):
                # 核心代码块
                _sum = 0
                for item_id, rating in zip(item_ids, ratings):
                    _sum += rating - self.mean_rating - bi[item_id]
                bu[user_id] = _sum / (self.reg_bu + len(item_ids))
            for item_id, user_ids, ratings in self.item_ratings.itertuples(index=True):
                # 核心代码块
                _sum = 0
                for user_id, rating in zip(user_ids, ratings):
                    _sum += rating - self.mean_rating - bu[user_id]
                bi[item_id] = _sum / (self.reg_bi + len(user_ids))
        return bu, bi

    def predict(self, user_id, item_id):
        """
        预测
        :param user_id:
        :param item_id:
        :return:
        """
        return self.mean_rating + self.bu[user_id] + self.bi[item_id]


if __name__ == '__main__':
    dataset = load_data(DATA_PATH)
    cf1 = BaselineCFBySGD(20, 0.1, 0.1, ('userId', 'movieId', 'rating'))
    cf2 = BaselineCFByALS(20, 0.1, 0.1, ('userId', 'movieId', 'rating'))
    cf1.fit(dataset)
    cf2.fit(dataset)
    user_id = 1
    item_id = 1
    print(cf1.predict(user_id, item_id))
    print(cf2.predict(user_id, item_id))
