
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from measure import measure_method

user_rating_train = pd.read_csv('C:\\Users\\lyf\\Desktop\\赵易聪\\大数据分析B\\HW2\\Project2-data\\user-rating_train.csv', index_col=0)
user_rating_test = pd.read_csv('C:\\Users\\lyf\\Desktop\\赵易聪\\大数据分析B\\HW2\\Project2-data\\user-rating_test.csv', index_col=0)

# train, test = ML_data_preproccessing.train_test_split(user_rating)
ratings_index = user_rating_train.index
ratings_col = user_rating_train.columns
train = pd.DataFrame(user_rating_train,
                         columns=ratings_col,
                         index= ratings_index)
test = pd.DataFrame(user_rating_test,
                        columns=ratings_col,
                        index= ratings_index)

class Matrix_Factorization_BGD(object):
    # :param K:隐变量维度
    # :param gama:学习率
    # :param lamuda:正则项系数，默认l1和l2系数都是lamuda
    # :param R:用户商品二维关系的dataframe
    # :param P:用户隐矩阵
    # :param Q:商品隐矩阵
    # :param r_index:是R的nonzero
    # :param r:是R的非零项拉伸成列表
    # :param sq_loss:是哈德曼积之后的残差矩阵

    def __init__(self, K=10, alpha=0.00002, lamuda=0.01, epoch=10, regularization=True, random_state=100, test=test):
        # 规定gama是1e-1到1e-5的数
        self.R = None
        self.K = K
        self.A = None
        self.P = None
        self.Q = None
        self.r_index = None
        self.r = None
        self.length = None
        self.sq_loss = None
        self.alpha = alpha
        self.lamuda = lamuda
        self.epoch = epoch
        self.regularization = regularization
        self.random_state = random_state
        self.test = test

    def fit(self, R):

        np.random.seed(self.random_state)
        self.R = R.values
        M, N = self.R.shape
        self.P = np.random.rand(M, self.K)
        self.Q = np.random.rand(N, self.K)

        self.r_index = self.R.nonzero()
        self.r = self.R[self.r_index[0], self.r_index[1]]
        self.length = len(self.r)
        self.A = np.zeros([10000, 10000])
        for i in range(self.length):
            self.A[self.r_index[0][i]][self.r_index[1][i]] = 1

    def _comp_frob_2(self, X):
        # 这里的X是np矩阵
        x_index = X.nonzero()
        x = X[x_index[0], x_index[1]]
        return x.dot(x.T)

    def _comp_descent(self):

        self.sq_loss = (self.P.dot(self.Q.T) - self.R) * self.A
        if self.regularization == True:
            descent_P = self.sq_loss.dot(self.Q) + self.lamuda * self.P * 2
            descent_Q = (self.sq_loss.T).dot(self.P) + self.lamuda * self.Q * 2
        else:
            descent_P = self.sq_loss.dot(self.Q)
            descent_Q = (self.sq_loss.T).dot(self.P)

        return descent_P, descent_Q

    def _update(self, descent_P, descent_Q):

        P_new = self.P - self.alpha * descent_P
        Q_new = self.Q - self.alpha * descent_Q

        return P_new, Q_new

    def _estimate_r_hat(self):

        r_hat = self.P.dot(self.Q.T)[self.r_index[0], self.r_index[1]]

        return r_hat

    def start(self,test):

        epoch_num = 1
        f_x_lst = []
        r_x_lst = []
        while epoch_num <= self.epoch:
            descent_P, descent_Q = self._comp_descent()
            P_new, Q_new = self._update(descent_P, descent_Q)
            self.P = P_new
            self.Q = Q_new

            loss_func = 0.5 * self._comp_frob_2(self.sq_loss) + self.lamuda * (
                        self._comp_frob_2(self.P) + self._comp_frob_2(self.Q))
            print('The loss func is %s=================Epoch:%s' % (loss_func, epoch_num))
            f_x_lst.append([epoch_num, loss_func])

            non_index = test.values.nonzero()
            pred_MF = self.P.dot(self.Q.T)[non_index[0], non_index[1]]
            actual = test.values[non_index[0], non_index[1]]
            in_rmse = measure_method.comp_rmse(pred_MF, actual)
            print('The in_rmse is %s=================Epoch:%s' % (in_rmse, epoch_num))
            r_x_lst.append([epoch_num, in_rmse])



            epoch_num += 1

        f_x_lst = np.array(f_x_lst)
        plt.plot(f_x_lst[:, 0], f_x_lst[:, 1])
        plt.xlabel('epoch_num')
        plt.ylabel('loss function for the epoch')
        plt.show()

        r_x_lst = np.array(r_x_lst)
        plt.plot(r_x_lst[:, 0], r_x_lst[:, 1])
        plt.xlabel('epoch_num')
        plt.ylabel('in_rmse for the epoch')
        plt.show()

        R_hat = self.P.dot(self.Q.T)
        return R_hat