# coding: utf-8

# 将所有特征串联起来，构成RS_Train.csv
# RS_Test.csv
# 为最后推荐系统做准备
from __future__ import division

import cPickle
import numpy as np
import scipy.io as sio
import scipy.sparse as ss
from numpy.random import random
from collections import defaultdict


class RecommonderSystem:
    def __init__(self):
        # 读入数据做初始化

        # 用户和活动新的索引
        self.userIndex = cPickle.load(open("PE_userIndex.pkl", 'rb'))
        self.eventIndex = cPickle.load(open("PE_eventIndex.pkl", 'rb'))
        self.n_users = len(self.userIndex)
        self.n_items = len(self.eventIndex)

        # 用户-活动关系矩阵R
        # 在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(
        self.userEventScores = sio.mmread("PE_userEventScores").todense()

        # 倒排表
        ##每个用户参加的事件
        self.itemsForUser = cPickle.load(open("PE_eventsForUser.pkl", 'rb'))
        ##事件参加的用户
        self.usersForItem = cPickle.load(open("PE_usersForEvent.pkl", 'rb'))

        # 基于模型的协同过滤参数初始化,训练
        self.init_SVD()
        self.train_SVD(trainfile="train.csv")

        # 根据用户属性计算出的用户之间的相似度
        self.userSimMatrix = sio.mmread("US_userSimMatrix").todense()

        # 根据活动属性计算出的活动之间的相似度
        self.eventPropSim = sio.mmread("EV_eventPropSim").todense()
        self.eventContSim = sio.mmread("EV_eventContSim").todense()

        # 每个用户的朋友的数目
        self.numFriends = sio.mmread("UF_numFriends")
        # 用户的每个朋友参加活动的分数对该用户的影响
        self.userFriends = sio.mmread("UF_userFriends").todense()

        # 活动本身的热度
        self.eventPopularity = sio.mmread("EA_eventPopularity").todense()

    def init_SVD(self, K=20):
        # 初始化模型参数（for 基于模型的协同过滤SVD_CF）
        self.K = K

        self.bu = np.zeros((self.n_users, 1))
        self.bi = np.zeros((1, self.n_items))

        # the small matrix
        self.P = np.random.rand(self.n_users, self.K)
        self.Q = np.random.rand(self.K, self.n_items)

        # init parameters
        # bias
        # self.bi = np.zeros(self.n_items)
        # self.bu = np.zeros(self.n_users)


        # the small matrix
        # self.P = random((self.n_users, self.K)) / 10 * (np.sqrt(self.K))
        # self.Q = random((self.K, self.n_items)) / 10 * (np.sqrt(self.K))

    def train_SVD(self, trainfile='train.csv', steps=1000000, gamma=0.04, Lambda=0.15):
        # 训练SVD模型（for 基于模型的协同过滤SVD_CF）
        # gamma：为学习率
        # Lambda：正则参数

        # 偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据
        print("SVD Train...")
        ftrain = open(trainfile, 'r')
        ftrain.readline()
        self.mu = 0.0
        n_records = 0
        uids = []  # 每条记录的用户索引
        i_ids = []  # 每条记录的item索引
        # 用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要
        R = np.zeros((self.n_users, self.n_items))

        for line in ftrain:
            cols = line.strip().split(",")
            print(cols)
            u = self.userIndex[cols[0]]  # 用户
            i = self.eventIndex[cols[1]]  # 活动
            print(u, i)

            uids.append(u)
            i_ids.append(i)

            R[u, i] = int(cols[4])  # interested
            self.mu += R[u, i]
            n_records += 1

        ftrain.close()
        self.mu /= n_records

        # 请补充完整SVD模型训练过程
        print("SVD trained")

        for number in range(steps):
            uidIndex = np.random.randint(0, self.n_users)
            eventIndex = np.random.randint(0, self.n_items)

            eui = R[uidIndex, eventIndex] - self.pred_SVD(uidIndex, eventIndex)  # 残差eui

            gradient_bu = Lambda * self.bu[uidIndex, 0] - eui  # 增加偏差项对bu求偏导
            gradient_bi = Lambda * self.bi[0, eventIndex] - eui  # 增加偏差项对bi求偏导
            gradient_puk = Lambda * self.P[uidIndex, :] - eui * self.Q[:, eventIndex]  # 增加正则项偏导puk
            gradient_qki = Lambda * self.Q[:, eventIndex] - eui * self.P[uidIndex, :]  # 增加正则项的偏导qik
            self.bu[uidIndex, 0] = self.bu[uidIndex, 0] - gamma * gradient_bu  # 更新bu(通过减去梯度乘上收缩因子)
            self.bi[0, eventIndex] = self.bi[0, eventIndex] - gamma * gradient_bi  # 更新bi(通过减去梯度乘上收缩因子)
            self.P[uidIndex, :] = self.P[uidIndex, :] - gamma * gradient_puk  # 更新p(通过减去梯度乘上收缩因子)
            self.Q[:, eventIndex] = self.Q[:, eventIndex] - gamma * gradient_qki  # 更新Q(通过减去梯度乘上收缩因子)

    def pred_SVD(self, uid, i_id):
        # 根据当前参数，预测用户uid对Item（i_id）的打分
        ans = self.mu + self.bi[0,i_id] + self.bu[uid,0] + np.dot(self.P[uid, :], self.Q[:, i_id])

        # 将打分范围控制在0-1之间
        if ans > 1:
            return 1
        elif ans < 0:
            return 0
        return ans

    def sim_cal_UserCF(self, uid1, uid2):
        # 请补充基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）
        similarity = 0.0
        uidScoresMean2 = 0

        eventsForUser1 = self.itemsForUser[uid1]
        eventsForUser2 = self.itemsForUser[uid2]
        sameEnent = eventsForUser1 & eventsForUser2
        # 计算用户对共同商品的打分均值,利用userEventScores
        countSameEvent = len(sameEnent)
        if (countSameEvent > 0):
            uidScoresMean1 = np.mean(self.userEventScores[uid1, [i for i in eventsForUser1]])
            uidScoresMean2 = np.mean(self.userEventScores[uid2, [i for i in eventsForUser2]])
            sum_fenzi = 0
            sum_fenmu_user1 = 0
            sum_fenmu_user2 = 0
            for i in sameEnent:
                tempUserResult1 = self.userEventScores[uid1, i] - uidScoresMean1
                tempUserResult2 = self.userEventScores[uid2, i] - uidScoresMean2
                sum_fenzi += tempUserResult1 * tempUserResult2
                sum_fenmu_user1 += tempUserResult1 ** 2
                sum_fenmu_user2 += tempUserResult2 ** 2
            if ((sum_fenmu_user1 != 0) and (sum_fenmu_user2 != 0)):
                similarity = sum_fenzi / (np.sqrt(sum_fenmu_user1) * np.sqrt(sum_fenmu_user2))

        return similarity,uidScoresMean2

    def userCFReco(self, userId, eventId):
        """
        根据User-based协同过滤，得到event的推荐度
        基本的伪代码思路如下：
        for item i
          for every other user v that has a preference for i
            compute similarity s between u and v
            incorporate v's preference for i weighted by s into running aversge
        return top items ranked by weighted average
        """
        # 请补充完整代码
        ans = 0.0

        threshold = 0.5
        uidIndex = self.userIndex[userId]
        eventIndex = self.eventIndex[eventId]
        sameUsersForEvent = self.usersForItem[eventIndex]
        # 逐个计算其他用户和当前用户的相似度
        sum_fenzi = 0
        sum_fenmu = 0
        countSameUser = len(sameUsersForEvent)
        if (countSameUser > 1):
            for otherIdIndex in sameUsersForEvent:
                if (otherIdIndex != uidIndex):
                    sim_cal_User,uidScoresMean2 = self.sim_cal_UserCF(uidIndex, otherIdIndex)
                    if sim_cal_User > threshold:
                        sum_fenzi += sim_cal_User * (self.userEventScores[otherIdIndex, eventIndex] - uidScoresMean2)
                        sum_fenmu += sim_cal_User
            if sum_fenmu != 0:
                # 计算用户1 展示过的所有event的均值
                eventsForUser1 = self.itemsForUser[uidIndex]
                uidScoresMean1 = np.mean(self.userEventScores[uidIndex, [i for i in eventsForUser1]])
                ans = uidScoresMean1 + sum_fenzi / sum_fenmu

        return ans

    def sim_cal_ItemCF(self, i_id1, i_id2):
        # 计算Item i_id1和i_id2之间的相似性
        # 请补充完整代码
        similarity = 0.0
        userForEvent1 = self.usersForItem[i_id1]
        userForEvent2 = self.usersForItem[i_id2]
        sameUser = userForEvent1 & userForEvent2
        # 计算用户对共同商品的打分均值,利用userEventScores
        countSameUser = len(sameUser)
        if (countSameUser > 0):
            eventScoresMean1 = np.mean(self.userEventScores[[i for i in userForEvent1], i_id1])
            eventScoresMean2 = np.mean(self.userEventScores[[i for i in userForEvent2], i_id2])
            sum_fenzi = 0
            sum_fenmu_event1 = 0
            sum_fenmu_event2 = 0
            for i in sameUser:
                tempEventResult1 = self.userEventScores[i, i_id1] - eventScoresMean1
                tempEventResult2 = self.userEventScores[i, i_id2] - eventScoresMean2
                sum_fenzi += tempEventResult1 * tempEventResult2
                sum_fenmu_event1 += tempEventResult1 ** 2
                sum_fenmu_event2 += tempEventResult2 ** 2
            if ((sum_fenmu_event1 != 0) and (sum_fenmu_event2 != 0)):
                similarity = sum_fenzi / (np.sqrt(sum_fenmu_event1) * np.sqrt(sum_fenmu_event2))
        # print('sim_cal_EventCF result is ',similarity)
        return similarity,eventScoresMean2

    def eventCFReco(self, userId, eventId):
        """
        根据基于物品的协同过滤，得到Event的推荐度
        基本的伪代码思路如下：
        for item i
            for every item j tht u has a preference for
                compute similarity s between i and j
                add u's preference for j weighted by s to a running average
        return top items, ranked by weighted average
        """
        # 请补充完整代码
        ans = 0.0

        threshold = 0.2
        uidIndex = self.userIndex[userId]
        eventIndex = self.eventIndex[eventId]
        sameEventsForUser = self.itemsForUser[uidIndex]
        # 逐个计算其他event和当前event的相似度
        sum_fenzi = 0
        sum_fenmu = 0
        countSameEvent = len(sameEventsForUser)
        if (countSameEvent > 1):
            for otherEventIndex in sameEventsForUser:
                if (otherEventIndex != eventIndex):
                    sim_cal_Event,eventScoresMean2 = self.sim_cal_ItemCF(eventIndex, otherEventIndex)
                    if sim_cal_Event > threshold:
                        sum_fenzi += sim_cal_Event * (self.userEventScores[uidIndex, otherEventIndex] - eventScoresMean2)
                        sum_fenmu += sim_cal_Event
            if sum_fenmu != 0:
                # 计算event展示过的所有user的均值
                usersForEvent1 = self.usersForItem[eventIndex]
                eventScoresMean1 = np.mean(self.userEventScores[[i for i in usersForEvent1], otherEventIndex])
                ans = eventScoresMean1 + sum_fenzi / sum_fenmu

        return ans

    def svdCFReco(self, userId, eventId):
        # 基于模型的协同过滤, SVD++/LFM
        u = self.userIndex[userId]
        i = self.eventIndex[eventId]

        return self.pred_SVD(u, i)

    def userReco(self, userId, eventId):
        """
        类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度
        基本的伪代码思路如下：
        for item i
          for every other user v that has a preference for i
            compute similarity s between u and v
            incorporate v's preference for i weighted by s into running aversge
        return top items ranked by weighted average
        """
        i = self.userIndex[userId]
        j = self.eventIndex[eventId]

        vs = self.userEventScores[:, j]
        sims = self.userSimMatrix[i, :]

        prod = sims * vs

        try:
            return prod[0, 0] - self.userEventScores[i, j]
        except IndexError:
            return 0

    def eventReco(self, userId, eventId):
        """
        类似基于Item-based协同过滤，只是item之间的相似度由item本身的属性得到，计算Event的推荐度
        基本的伪代码思路如下：
        for item i
          for every item j that u has a preference for
            compute similarity s between i and j
            add u's preference for j weighted by s to a running average
        return top items, ranked by weighted average
        """
        i = self.userIndex[userId]
        j = self.eventIndex[eventId]
        js = self.userEventScores[i, :]
        psim = self.eventPropSim[:, j]
        csim = self.eventContSim[:, j]
        pprod = js * psim
        cprod = js * csim

        pscore = 0
        cscore = 0
        try:
            pscore = pprod[0, 0] - self.userEventScores[i, j]
        except IndexError:
            pass
        try:
            cscore = cprod[0, 0] - self.userEventScores[i, j]
        except IndexError:
            pass
        return pscore, cscore

    def userPop(self, userId):
        """
        基于用户的朋友个数来推断用户的社交程度
        主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动
        """
        if self.userIndex.has_key(userId):
            i = self.userIndex[userId]
            try:
                return self.numFriends[0, i]
            except IndexError:
                return 0
        else:
            return 0

    def friendInfluence(self, userId):
        """
        朋友对用户的影响
        主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的
        用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响
        """
        nusers = np.shape(self.userFriends)[1]
        i = self.userIndex[userId]
        return (self.userFriends[i, :].sum(axis=0) / nusers)[0, 0]

    def eventPop(self, eventId):
        """
        本活动本身的热度
        主要是通过参与的人数来界定的
        """
        i = self.eventIndex[eventId]
        return self.eventPopularity[i, 0]


def generateRSData(RS, train=True, header=True):
    """
    把前面user-based协同过滤 和 item-based协同过滤，以及各种热度和影响度作为特征组合在一起
    生成新的训练数据，用于分类器分类使用
    """
    fn = "train.csv" if train else "test.csv"
    fin = open(fn, 'rb')
    fout = open("RS_" + fn, 'wb')

    # 忽略第一行（列名字）
    fin.readline().strip().split(",")

    # write output header
    if header:
        ocolnames = ["invited", "userCF_reco", "evtCF_reco", "svdCF_reco", "user_reco", "evt_p_reco",
                     "evt_c_reco", "user_pop", "frnd_infl", "evt_pop"]
        if train:
            ocolnames.append("interested")
            ocolnames.append("not_interested")
        fout.write(",".join(ocolnames) + "\n")

    ln = 0
    for line in fin:
        ln += 1
        if ln % 500 == 0:
            print("%s:%d (userId, eventId)=(%s, %s)" % (fn, ln, userId, eventId))
            # break;

        cols = line.strip().split(",")
        userId = cols[0]
        eventId = cols[1]
        invited = cols[2]

        userCF_reco = RS.userCFReco(userId, eventId)
        itemCF_reco = RS.eventCFReco(userId, eventId)
        svdCF_reco = RS.svdCFReco(userId, eventId)

        user_reco = RS.userReco(userId, eventId)
        evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)
        user_pop = RS.userPop(userId)

        frnd_infl = RS.friendInfluence(userId)
        evt_pop = RS.eventPop(eventId)
        ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco, user_reco, evt_p_reco,
                 evt_c_reco, user_pop, frnd_infl, evt_pop]

        if train:
            ocols.append(cols[4])  # interested
            ocols.append(cols[5])  # not_interested
        fout.write(",".join(map(lambda x: str(x), ocols)) + "\n")

    fin.close()
    fout.close()


RS = RecommonderSystem()
print("生成训练数据...\n")
generateRSData(RS, train=True, header=True)

print("生成预测数据...\n")
generateRSData(RS, train=False, header=True)

# 时间、地点等特征都没有处理了，可以考虑用户看到event的时间与event开始时间的差、用户地点和event地点的差异。。。
