import csv
import math

from numpy.linalg import solve
import numpy as np
from sklearn.metrics import mean_squared_error


class ExplicitMF_Pearsion():
    def __init__(self,
                 ratings,
                 n_factors=40,
                 learning='sgd',
                 item_fact_reg=0.0,
                 user_fact_reg=0.0,
                 item_bias_reg=0.0,
                 user_bias_reg=0.0,
                 persion_user_reg=0.0,
                 persion_item_reg=0.0,
                 num_pearsion=5,
                 verbose=False):
        """
        Train a matrix factorization model to predict empty 
        entries in a matrix. The terminology assumes a 
        ratings matrix which is ~ user x item

        Params
        ======
        ratings : (ndarray)
            User x Item matrix with corresponding ratings

        n_factors : (int)
            Number of latent factors to use in matrix
            factorization model
        learning : (str)
            Method of optimization. Options include
            'sgd' or 'als'.

        item_fact_reg : (float)
            Regularization term for item latent factors

        user_fact_reg : (float)
            Regularization term for user latent factors

        item_bias_reg : (float)
            Regularization term for item biases

        user_bias_reg : (float)
            Regularization term for user biases

        verbose : (bool)
            Whether or not to printout training progress
        """

        self.ratings = ratings
        print("ratings = {}".format(self.ratings))
        print("ratings.shape = {}".format(self.ratings.shape))
        self.n_users, self.n_items = ratings.shape
        self.n_factors = n_factors
        self.item_fact_reg = item_fact_reg
        self.user_fact_reg = user_fact_reg
        # self.item_bias_reg = item_bias_reg
        # self.user_bias_reg = user_bias_reg
        self.persion_user_reg = persion_user_reg
        self.persion_item_reg = persion_item_reg
        self.num_pearsion = num_pearsion
        self.learning = learning
        if self.learning == 'sgd':
            self.sample_row, self.sample_col = self.ratings.nonzero()
            self.n_samples = len(self.sample_row)
        self._v = verbose

    def als_step(self,
                 latent_vectors,
                 fixed_vecs,
                 ratings,
                 _lambda,
                 type='user'):
        """
        One of the two ALS steps. Solve for the latent vectors
        specified by type.
        """
        if type == 'user':
            # Precompute
            YTY = fixed_vecs.T.dot(fixed_vecs)
            lambdaI = np.eye(YTY.shape[0]) * _lambda

            for u in range(latent_vectors.shape[0]):
                latent_vectors[u, :] = solve((YTY + lambdaI),
                                             ratings[u, :].dot(fixed_vecs))
        elif type == 'item':
            # Precompute
            XTX = fixed_vecs.T.dot(fixed_vecs)
            lambdaI = np.eye(XTX.shape[0]) * _lambda

            for i in range(latent_vectors.shape[0]):
                latent_vectors[i, :] = solve((XTX + lambdaI),
                                             ratings[:, i].T.dot(fixed_vecs))
        return latent_vectors

    def train(self, n_iter=10, learning_rate=0.1):
        """
        Train model for n_iter iterations from scratch.
        从头开始的n_iter迭代的训练模型。
        :param n_iter: 迭代次数
        :param learning_rate: 学习率，α
        """
        # initialize latent vectors
        self.user_vecs = np.random.normal(scale=1. / self.n_factors, \
                                          size=(self.n_users, self.n_factors))
        self.item_vecs = np.random.normal(scale=1. / self.n_factors,
                                          size=(self.n_items, self.n_factors))

        if self.learning == 'als':
            self.partial_train(n_iter)
        elif self.learning == 'sgd':
            # 初始化各个参数
            self.learning_rate = learning_rate
            # self.user_bias = np.zeros(self.n_users)
            # self.item_bias = np.zeros(self.n_items)
            # self.global_bias = np.mean(self.ratings[np.where(self.ratings != 0)])
            # self.persion_user_score = self.load_pearsion("pearsion_user_final.csv", into_o_1=True)
            self.persion_user_score = self.load_pearsion("furry_user_sim_Similarity.csv", into_o_1=True)
            # self.persion_item_score = self.load_pearsion("pearsion_item_final.csv", into_o_1=True)
            self.partial_train(n_iter)

    def partial_train(self, n_iter):
        """
        Train model for n_iter iterations. Can be
        called multiple times for further training.
        用于n_iter迭代的训练模型。可以
        多次打电话要求进一步培训。
        """
        # if self.persion_reg != 0:
        #     print("开始带相似度正则化的SGD算法的训练！")
        # else:
        #     print("开始SGD算法的训练！")

        # 迭代次数计数
        ctr = 1
        while ctr <= n_iter:
            if ctr % 10 == 0 and self._v:
                print('\tcurrent iteration: {}'.format(ctr))
            if self.learning == 'als':
                self.user_vecs = self.als_step(self.user_vecs,
                                               self.item_vecs,
                                               self.ratings,
                                               self.user_fact_reg,
                                               type='user')
                self.item_vecs = self.als_step(self.item_vecs,
                                               self.user_vecs,
                                               self.ratings,
                                               self.item_fact_reg,
                                               type='item')
            elif self.learning == 'sgd':
                self.training_indices = np.arange(self.n_samples)
                np.random.shuffle(self.training_indices)
                self.sgd()
            ctr += 1

    def sgd(self):
        """
        SGD算法核心
        """
        for idx in self.training_indices:
            # u 和 i 是用户id
            u = self.sample_row[idx]
            i = self.sample_col[idx]
            # prediction 是预测值
            prediction = self.predict(u, i)
            # e 是真实与预测的差值
            e = (self.ratings[u, i] - prediction)  # error

            # 参数测试
            # print("u = {}".format(u))
            # print("i = {}".format(i))
            # print("prediction = {}".format(prediction))
            # print("e = {}".format(e))

            # 偏置选项
            # Update biases
            # bu = bu + u(eui - y * bu)
            # bi = bi + u(eui - y * bi)
            # self.user_bias[u] += self.learning_rate * \
            #                      (e - self.user_bias_reg * self.user_bias[u])
            # self.item_bias[i] += self.learning_rate * \
            #                      (e - self.item_bias_reg * self.item_bias[i])

            # print("self.user_vecs = {}".format(self.user_vecs))
            # print("self.user_vecs.shape = {}".format(self.user_vecs.shape))

            # 相似度选项
            # Update pearsion
            if self.persion_user_reg != 0:
                # 开始计算用户相似度部分
                # 公式：α * sum(s_if * (u_i - u_f))
                top_k_idx, bottom_k_idx = self.get_index(mode="user", u=u)
                count_cal_pes = 0
                for f in np.append(top_k_idx, bottom_k_idx):
                    # print("现在的迭代次数 = {}".format(count_cal_pes + 1))
                    if count_cal_pes == 0:
                        # print("现在的迭代次数 = {}".format(count_cal_pes + 1))
                        # print("self.persion_user_score[u, f] = {}".format(self.persion_user_score[u, f]))
                        # print("self.user_vecs[u, :] = {}".format(self.user_vecs[u, :]))
                        # print("self.user_vecs[f, :] = {}".format(self.user_vecs[f, :]))
                        sum_user_pearsion = self.persion_user_score[u, f] * (
                                self.user_vecs[u, :] - self.user_vecs[f, :])
                        # print("type(sum_pearsion) = {}".format(type(sum_pearsion)))
                        # print("sum_pearsion = {}".format(sum_pearsion))
                    elif count_cal_pes < self.num_pearsion:
                        # print("现在的迭代次数 = {}".format(count_cal_pes + 1))
                        sum_user_pearsion += self.persion_user_score[u, f] * (
                                self.user_vecs[u, :] - self.user_vecs[f, :])
                    else:
                        # print("现在的迭代次数 = {}".format(count_cal_pes + 1))
                        sum_user_pearsion -= self.persion_user_score[u, f] * (
                                self.user_vecs[u, :] - self.user_vecs[f, :])
                    count_cal_pes += 1



                self.pearsion_user_arr = self.persion_user_reg * sum_user_pearsion

                # Update latent factors
                # # X_u = X_u + learnRate * (e_ui * y_i - user_reg * X_u)
                self.user_vecs[u, :] += self.learning_rate * \
                                        (e * self.item_vecs[i, :] - \
                                         self.pearsion_user_arr - \
                                         self.user_fact_reg * self.user_vecs[u, :])

                self.item_vecs[i, :] += self.learning_rate * \
                                        (e * self.user_vecs[u, :] - \
                                         self.item_fact_reg * self.item_vecs[i, :])

            else:
                # Update latent factors
                # X_u = X_u + learnRate * (e_ui * y_i - user_reg * X_u)
                self.user_vecs[u, :] += self.learning_rate * \
                                        (e * self.item_vecs[i, :] - \
                                         self.user_fact_reg * self.user_vecs[u, :])
                # Y_u = Y_u + learnRate * (e_ui * u_i - user_reg * Y_u)
                self.item_vecs[i, :] += self.learning_rate * \
                                        (e * self.user_vecs[u, :] - \
                                         self.item_fact_reg * self.item_vecs[i, :])

    def predict(self, u, i):
        """ Single user and item prediction."""
        if self.learning == 'als':
            return self.user_vecs[u, :].dot(self.item_vecs[i, :].T)
        elif self.learning == 'sgd':
            # prediction = self.global_bias + self.user_bias[u] + self.item_bias[i]
            prediction = self.user_vecs[u, :].dot(self.item_vecs[i, :].T)
            return prediction

    def predict_all(self):
        """ Predict ratings for every user and item."""
        predictions = np.zeros((self.user_vecs.shape[0],
                                self.item_vecs.shape[0]))
        for u in range(self.user_vecs.shape[0]):
            for i in range(self.item_vecs.shape[0]):
                predictions[u, i] = self.predict(u, i)

        return predictions

    def get_mse(self, pred, actual):
        """
        计算mse
        :param pred: 预测
        :param actual: 实际
        :return: mse
        """
        # Ignore nonzero terms.
        pred = pred[actual.nonzero()].flatten()
        actual = actual[actual.nonzero()].flatten()
        return mean_squared_error(pred, actual)

    def calculate_learning_curve(self, iter_array, test, learning_rate=0.1):
        """
        Keep track of MSE as a function of training iterations.
        跟踪MSE作为培训迭代的功能。

        Params
        ======
        iter_array : (list)
            List of numbers of iterations to train for each step of
            the learning curve. e.g. [1, 5, 10, 20]
        test : (2D ndarray)
            Testing dataset (assumed to be user x item).

        The function creates two new class attributes:

        train_mse : (list)
            Training data MSE values for each value of iter_array
        test_mse : (list)
            Test data MSE values for each value of iter_array
        """
        iter_array.sort()
        self.train_mse = []
        self.test_mse = []
        iter_diff = 0
        for (i, n_iter) in enumerate(iter_array):
            if self._v:
                print('Iteration: {}'.format(n_iter))
            if i == 0:
                self.train(n_iter - iter_diff, learning_rate)
            else:
                self.partial_train(n_iter - iter_diff)

            predictions = self.predict_all()

            self.train_mse += [self.get_mse(predictions, self.ratings)]
            self.test_mse += [self.get_mse(predictions, test)]
            if self._v:
                print('Train mse: ' + str(self.train_mse[-1]))
                print('Test mse: ' + str(self.test_mse[-1]))
            iter_diff = n_iter

    def load_pearsion(self, path, into_o_1=False):
        """
        读取相似度矩阵
        :return: 610*610 的相似度矩阵
        """
        pearsion_all_final = []
        with open(path) as csvfile:
            csv_reader = csv.reader(csvfile)  # 使用csv.reader读取csvfile中的文件
            pearson_header = next(csv_reader)  # 读取第一行每一列的标题
            for row in csv_reader:  # 将csv 文件中的数据保存到birth_data中
                #         row_int = int(row)
                row_int = list(map(float, row))
                pearsion_all_final.append(row_int)

        pearsion_by_user = []
        for i in pearsion_all_final:
            pearsion_by_user.append(i[2])

        # len(pearsion_by_user)

        # pearsion_by_user[610:1200]

        if into_o_1 == True:
            for i in range(len(pearsion_by_user)):
                pearsion_by_user[i] = (pearsion_by_user[i] + 1) / 2

        pearsion_by_user_arr = np.array(pearsion_by_user)

        length_pearsion_arr = int(math.sqrt(pearsion_by_user_arr.shape[0]))

        pearsion_by_user_arr_T = pearsion_by_user_arr.reshape(length_pearsion_arr, length_pearsion_arr)

        # pearsion_by_user_arr_T[1]
        return pearsion_by_user_arr_T

    def get_index(self, mode, u):
        """
        根据给出的 userid 获取 N 个最大和最小的值得下标
        :param u: 用户id
        :return: N 个最大和最小的值的下标
        """
        u = int(u)
        if mode == "user":
            # top_k = 3
            top_k_idx = self.persion_user_score[u].argsort()[::-1][0:self.num_pearsion]

            bottom_k_idx = self.persion_user_score[u].argsort()[::-1][-self.num_pearsion:]
        elif mode == "item":
            # top_k = 3
            top_k_idx = self.persion_item_score[u].argsort()[::-1][0:self.num_pearsion]

            bottom_k_idx = self.persion_item_score[u].argsort()[::-1][-self.num_pearsion:]
        else:
            print("mode 设置错误，当前设置如下：")
            print("mode = {}".format(mode))
            return [0] * (self.num_pearsion * 2)

        return top_k_idx, np.sort(bottom_k_idx)

    def add_sim_by_id(self, u):
        sum_sim_byid = 0
        for i in range(self.persion_score.shape[0]):
            sum_sim_byid += self.persion_score[u][i]
        return sum_sim_byid
