import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from numpy.random import random
from collections import defaultdict
import json
#稀疏矩阵，存储打分表
import scipy.io as sio
import scipy.sparse as ss
from sklearn.model_selection import train_test_split

#数据到文件存储
import pickle
dpath = './data/'
df_triplet = pd.read_csv(dpath +'triplet_dataset_sub.csv', )
# 首先统计每个用户总的播放量
triplet_dataset_sub_song_sum_df = df_triplet[['user','play_count']].groupby('user').sum().reset_index()
triplet_dataset_sub_song_sum_df.rename(columns={'play_count':'total_play_count'},inplace=True)
triplet_dataset_sub_song_merged = pd.merge(df_triplet,triplet_dataset_sub_song_sum_df)
#### 计算每个用户对每首歌曲的打分值
triplet_dataset_sub_song_merged['fractional_play_count'] = triplet_dataset_sub_song_merged['play_count']/triplet_dataset_sub_song_merged['total_play_count']


X_train,X_test=train_test_split(triplet_dataset_sub_song_merged,random_state=33, test_size=0.2)
# 统计总的用户数量和歌曲数量
unique_users=X_train.user.unique()
unique_items=X_train.song.unique()

n_users=unique_users.shape[0]
n_items=unique_items.shape[0]
# 建立用户和歌曲的索引表
users_index=dict()
items_index=dict()
for j,u in enumerate (unique_users):
    users_index[u]=j
for j,i in enumerate (unique_items):
    items_index[i]=j
# 倒排表
# 统计每个用户打过分的歌曲   / 每个歌曲被哪些用户打过分
user_items = defaultdict(set)
item_users = defaultdict(set)

# 用户-物品关系矩阵R, 稀疏矩阵，记录用户对每个歌曲的打分
user_item_scores = ss.dok_matrix((n_users, n_items))

# 扫描训练数据
for line in range(0, len(X_train.index)):  # 对每条记录
    cur_user_index = users_index[X_train.iloc[line]['user']]
    cur_item_index = items_index[X_train.iloc[line]['song']]

    # 倒排表
    user_items[cur_user_index].add(cur_item_index)  # 该用户对该歌曲歌曲进行了打分
    item_users[cur_item_index].add(cur_user_index)  # 该歌曲被该用户打分

    user_item_scores[cur_user_index, cur_item_index] = X_train.iloc[line]['fractional_play_count']

# 隐含变量的维数
K = 40

# item和用户的偏置项
bi = np.zeros((n_items, 1))
bu = np.zeros((n_users, 1))

# item和用户的隐含向量
qi = np.zeros((n_items, K))
pu = np.zeros((n_users, K))

for uid in range(n_users):  # 对每个用户
    pu[uid] = np.reshape(random((K, 1)) / 10 * (np.sqrt(K)), K)

for iid in range(n_items):  # 对每个item
    qi[iid] = np.reshape(random((K, 1)) / 10 * (np.sqrt(K)), K)

# 所有用户的平均打分
mu = X_train['fractional_play_count'].mean()  # average rating
def svd_pred(uid, iid):
    score = mu + bi[iid] + bu[uid] + np.sum(qi[iid]* pu[uid])
    return score


# gamma：为学习率
# Lambda：正则参数
# steps：迭代次数

steps = 50
gamma = 0.04
Lambda = 0.15

# 总的打分记录数目
n_records = X_train.shape[0]

for step in range(steps):
    print('The ' + str(step) + '-th  step is running')
    rmse_sum = 0.0

    # 将训练样本打散顺序
    # permutation不直接在原来的数组上进行操作，而是返回一个新的打乱顺序的数组，并不改变原来的数组。
    kk = np.random.permutation(n_records)
    for j in range(n_records):
        # 每次一个训练样本
        line = kk[j]

        uid = users_index[X_train.iloc[line]['user']]
        iid = items_index[X_train.iloc[line]['song']]

        rating = X_train.iloc[line]['fractional_play_count']

        # 预测残差
        eui = rating - svd_pred(uid, iid)
        # 残差平方和
        rmse_sum += eui ** 2

        # 随机梯度下降，更新
        bu[uid] += gamma * (eui - Lambda * bu[uid])
        bi[iid] += gamma * (eui - Lambda * bi[iid])

        temp = qi[iid]
        qi[iid] += gamma * (eui * pu[uid] - Lambda * qi[iid])
        pu[uid] += gamma * (eui * temp - Lambda * pu[uid])

        # 学习率递减
    gamma = gamma * 0.93
    print("the rmse of this step on train data is ", np.sqrt(rmse_sum / n_records))


def save_json(filepath):
    dict_ = {}
    dict_['mu'] = mu
    dict_['K'] = K

    dict_['bi'] = bi.tolist()
    dict_['bu'] = bu.tolist()

    dict_['qi'] = qi.tolist()
    dict_['pu'] = pu.tolist()

    # Creat json and save to file
    json_txt = json.dumps(dict_)
    with open(filepath, 'w') as file:
        file.write(json_txt)


def load_json(filepath):
    with open(filepath, 'r') as file:
        dict_ = json.load(file)

        mu = dict_['mu']
        K = dict_['K']

        bi = np.asarray(dict_['bi'])
        bu = np.asarray(dict_['bu'])

        qi = np.asarray(dict_['qi'])
        pu = np.asarray(dict_['pu'])


save_json('./model/svd_model.json')
load_json('./model/svd_model.json')


def svd_CF_recommend(user):
    cur_user_id = users_index[user]

    # 训练集中该用户打过分的item
    cur_user_items = user_items[cur_user_id]

    # 该用户对所有item的打分
    user_items_scores = np.zeros(n_items)

    # 预测打分
    for i in range(n_items):  # all items
        if i not in cur_user_items:  # 训练集中没打过分
            user_items_scores[i] = svd_pred(cur_user_id, i)  # 预测打分

    # 推荐
    # Sort the indices of user_item_scores based upon their value，Also maintain the corresponding score
    sort_index = sorted(((e, i) for i, e in enumerate(list(user_items_scores))), reverse=True)

    # Create a dataframe from the following
    columns = ['item_id', 'score']
    df = pd.DataFrame(columns=columns)

    # Fill the dataframe with top 20 (n_rec_items) item based recommendations
    # sort_index = sort_index[0:n_rec_items]
    # Fill the dataframe with all items based recommendations
    for i in range(0, len(sort_index)):
        cur_item_index = sort_index[i][1]
        cur_item = list(items_index.keys())[list(items_index.values()).index(cur_item_index)]

        if ~np.isnan(sort_index[i][0]) and cur_item_index not in cur_user_items:
            df.loc[len(df)] = [cur_item, sort_index[i][0]]

    return df


# 统计总的用户
unique_users_test = X_test['user'].unique()

# 为每个用户推荐的item的数目
n_rec_items = 10

# 性能评价参数初始化，用户计算Percison和Recall
n_hits = 0
n_total_rec_items = 0
n_test_items = 0

# 所有被推荐商品的集合（对不同用户），用于计算覆盖度
all_rec_items = set()

# 残差平方和，用与计算RMSE
rss_test = 0.0

# 对每个测试用户
for user in unique_users_test:
    # 测试集中该用户打过分的电影（用于计算评价指标的真实值）
    if user not in users_index:  # user在训练集中没有出现过，新用户不能用协同过滤
        print(str(user) + ' is a new user.\n')
        continue

    user_records_test = X_test[X_test.user == user]

    # 对每个测试用户，计算该用户对训练集中未出现过的商品的打分，并基于该打分进行推荐（top n_rec_items）
    # 返回结果为DataFrame
    rec_items = svd_CF_recommend(user)
    for i in range(n_rec_items):
        item = rec_items.iloc[i]['item_id']

        if item in user_records_test['song'].values:
            n_hits += 1
        all_rec_items.add(item)

    # 计算rmse
    for i in range(user_records_test.shape[0]):
        item = user_records_test.iloc[i]['song']
        score = user_records_test.iloc[i]['fractional_play_count']

        df1 = rec_items[rec_items.item_id == item]
        if (df1.shape[0] == 0):  # item不在推荐列表中，可能是新item在训练集中没有出现过，或者该用户已经打过分新item不能被协同过滤推荐
            print(str(item) + ' is a new item or  user ' + str(user) + ' already rated it.\n')
            continue
        pred_score = df1['score'].values[0]
        rss_test += (pred_score - score) ** 2  # 残差平方和

    # 推荐的item总数
    n_total_rec_items += n_rec_items

    # 真实item的总数
    n_test_items += user_records_test.shape[0]

#Precision & Recall
precision = n_hits / (1.0*n_total_rec_items)
recall = n_hits / (1.0*n_test_items)
#覆盖度：推荐商品占总需要推荐商品的比例
coverage = len(all_rec_items) / (1.0* n_items)
#打分的均方误差
rmse=np.sqrt(rss_test / X_test.shape[0])
print("The precision is", precision)
print("The recall is", recall)
print("The coverage is", coverage)
print("The rmse is", rmse)