# coding: utf-8
import pandas as pd
import numpy as np

#load数据（用户和物品索引，以及倒排表）
import pickle as pickle

#稀疏矩阵，打分表
import scipy.io as sio
import sys
import os

#距离
import scipy.spatial.distance as ssd

# 用户和item的索引
users_index = pickle.load(open('users_index.pkl', 'rb'))
items_index = pickle.load(open("items_index.pkl", 'rb'))

n_users = len(users_index)
n_items = len(items_index)

# 倒排表
##每个用户打过分的电影
user_items = pickle.load(open("user_items.pkl", 'rb'))
##对每个电影打过分的事用户
item_users = pickle.load(open("item_users.pkl", 'rb'))

# 用户-物品关系矩阵R
user_item_scores = sio.mmread("user_item_scores")  # .todense()
user_item_scores = user_item_scores.tocsr()

users_mu = np.zeros(n_users)
for u in range(n_users):
    n_user_items = 0
    r_acc = 0.0

    for i in user_items[u]:  # 用户打过分的item
        r_acc += user_item_scores[u, i]
        n_user_items += 1

    users_mu[u] = r_acc / n_user_items


def item_similarity(iid1, iid2):
    su = {}  # 有效item（两个用户均有打分的item）的集合
    for user in item_users[iid1]:  # 对iid1所有打过分的用户
        if user in item_users[iid2]:  # 如果该用户对iid2也打过分
            su[user] = 1  # 该用户为一个有效user

    n = len(su)  # 有效item数，有效item为即对uid对Item打过分，uid2也对Item打过分
    if (n == 0):  # 没有共同打过分的item，相似度设为0？
        similarity = 0
        return similarity

        # iid1的有效打分(减去用户的平均打分)
    s1 = np.array([user_item_scores[user, iid1] - users_mu[user] for user in su])

    # iid2的有效打分(减去用户的平均打分)
    s2 = np.array([user_item_scores[user, iid2] - users_mu[user] for user in su])

    similarity = 1 - ssd.cosine(s1, s2)
    if (np.isnan(similarity)):  # 分母为0（s1或s2中所有元素为0）
        similarity = 0.0
    return similarity


items_similarity_matrix = np.matrix(np.zeros(shape=(n_items, n_items)), float)

for i in range(n_items):
    items_similarity_matrix[i, i] = 1.0

    # 打印进度条
    if (i % 100 == 0):
        print("i=%d " % (i))

    for j in range(i + 1, n_items):  # items by user
        items_similarity_matrix[j, i] = item_similarity(i, j)
        items_similarity_matrix[i, j] = items_similarity_matrix[j, i]

pickle.dump(items_similarity_matrix, open("items_similarity.pkl", 'wb'))


def items_similarity(n_items):
    items_similarity_matrix = np.matrix(np.zeros(shape=(n_items, n_items)), float)

    for i in range(n_items):
        items_similarity_matrix[i, i] = 1.0

        # 打印进度条
        if (i % 100 == 0):
            print("i=:%d " % (i))

        for j in range(i + 1, n_items):  # items by user
            items_similarity_matrix[j, i] = item_similarity(i, j)
            items_similarity_matrix[i, j] = items_similarity_matrix[j, i]

    pickle.dump(items_similarity_matrix, open("items_similarity.pkl", 'wb'))
    return items_similarity_matrix
#所有用户之间的相似度
# items_similarity_matrix = pickle.load(open("items_similarity.pkl", 'rb'))
# items_similarity_matrix = items_similarity(n_users)
# print(items_similarity_matrix)


### 预测用户对item的打分
def Item_CF_pred1(uid, iid):
    sim_accumulate = 0.0
    rat_acc = 0.0

    for item_id in user_items[uid]:  # 对用户打过分的item
        # 计算当前用户打过分item与其他item之间的相似度
        # sim = item_similarity(item_id, iid)
        sim = items_similarity_matrix[item_id, iid]

        # 由于相似性可能为负，而用户打过分的item又不多（与iid不相似的item占多数）预测打分为负
        if sim != 0:
            rat_acc += sim * (user_item_scores[uid, item_id])  # 用户user对item i的打分
            sim_accumulate += np.abs(sim)

    if sim_accumulate != 0:  # no same user rated,return average rates of the data
        score = rat_acc / sim_accumulate
    else:  # no items the same user rated,return average rates of the user
        score = users_mu[uid]

    if score < 0:
        score = 0.0

    return score


### 预测用户对item的打分, 取该用户n_Knns最相似的物品
def Item_CF_pred2(uid, iid, n_Knns):
    sim_accumulate = 0.0
    rat_acc = 0.0
    n_nn_items = 0

    # 相似度排序
    # print('lixiao-Item-CF-pred2')
    # print(np.array(items_similarity_matrix[iid, :]).shape)
    cur_items_similarity = np.array(items_similarity_matrix[iid, :])
    cur_items_similarity = cur_items_similarity.flatten()
    sort_index = sorted(((e, i) for i, e in enumerate(list(cur_items_similarity))), reverse=True)

    for i in range(0, len(sort_index)):
        cur_item_index = sort_index[i][1]

        if n_nn_items >= n_Knns:  # 相似的items已经足够多（>n_Knns）
            break;

        if cur_item_index in user_items[uid]:  # 对用户打过分的item
            # 计算当前用户打过分item与其他item之间的相似度
            # sim = item_similarity(cur_item_index, iid)
            sim = items_similarity_matrix[iid, cur_item_index]

            if sim != 0:
                rat_acc += sim * (user_item_scores[uid, cur_item_index])  # 用户user对item i的打分
                sim_accumulate += np.abs(sim)

            n_nn_items += 1

    if sim_accumulate != 0:
        score = rat_acc / sim_accumulate
    else:  # no similar items,return average rates of the user
        score = users_mu[uid]

    if score < 0:
        score = 0.0

    return score


### 预测用户对item的打分, 取所有item中n_Knns最相似的物品
def Item_CF_pred3(uid, iid, n_Knns):
    sim_accumulate = 0.0
    rat_acc = 0.0

    # 相似度排序
    cur_items_similarity = np.array(items_similarity_matrix[iid, :])
    cur_items_similarity = cur_items_similarity.flatten()
    sort_index = sorted(((e, i) for i, e in enumerate(list(cur_items_similarity))), reverse=True)[0: n_Knns]

    for i in range(0, len(sort_index)):
        cur_item_index = sort_index[i][1]

        if cur_item_index in user_items[uid]:  # 对用户打过分的item
            # 计算当前用户打过分item与其他item之间的相似度
            # sim = item_similarity(cur_item_index, iid)
            sim = items_similarity_matrix[iid, cur_item_index]

            if sim != 0:
                rat_acc += sim * (user_item_scores[uid, cur_item_index])  # 用户user对item i的打分
                sim_accumulate += np.abs(sim)

    if sim_accumulate != 0:
        score = rat_acc / sim_accumulate
    else:  # no items the same user rated,return average rates of the user
        score = users_mu[uid]

    if score < 0:
        score = 0.0

    return score


# user：用户
# 返回推荐items及其打分（DataFrame）
def recommend(user):
    cur_user_id = users_index[user]

    # 训练集中该用户打过分的item
    cur_user_items = user_items[cur_user_id]

    # 该用户对所有item的打分
    user_items_scores = np.zeros(n_items)

    # 预测打分
    for i in range(n_items):  # all items
        if i not in cur_user_items:  # 训练集中没打过分
            user_items_scores[i] = Item_CF_pred2(cur_user_id, i, 10)  # 预测打分

    # 推荐
    # Sort the indices of user_item_scores based upon their value，Also maintain the corresponding score
    sort_index = sorted(((e, i) for i, e in enumerate(list(user_items_scores))), reverse=True)

    # Create a dataframe from the following
    columns = ['item_id', 'score']
    df = pd.DataFrame(columns=columns)

    # Fill the dataframe with top 20 (n_rec_items) item based recommendations
    # sort_index = sort_index[0:n_rec_items]
    # Fill the dataframe with all items based recommendations
    for i in range(0, len(sort_index)):
        cur_item_index = sort_index[i][1]
        cur_item = list(items_index.keys())[list(items_index.values()).index(cur_item_index)]

        if ~np.isnan(sort_index[i][0]) and cur_item_index not in cur_user_items:
            df.loc[len(df)] = [cur_item, sort_index[i][0]]

    return df

#读取测试数据
triplet_cols = ['user_id','item_id', 'rating', 'timestamp']

dpath = './data/'
df_triplet_test = pd.read_csv(dpath +'u1.test', sep='\t', names=triplet_cols, encoding='latin-1')
#df_triplet_test.head()

# 统计总的用户
unique_users_test = df_triplet_test['user_id'].unique()

# 为每个用户推荐的item的数目
n_rec_items = 20

# 性能评价参数初始化，用户计算Percison和Recall
n_hits = 0
n_total_rec_items = 0
n_test_items = 0

# 所有被推荐商品的集合（对不同用户），用于计算覆盖度
all_rec_items = set()

# 残差平方和，用与计算RMSE
rss_test = 0.0

# 对每个测试用户
for user in unique_users_test:
    # 测试集中该用户打过分的电影（用于计算评价指标的真实值）
    if user not in users_index:  # user在训练集中没有出现过，新用户不能用协同过滤
        print(str(user) + ' is a new user.\n')
        continue

    user_records_test = df_triplet_test[df_triplet_test.user_id == user]

    # 对每个测试用户，计算该用户对训练集中未出现过的商品的打分，并基于该打分进行推荐（top n_rec_items）
    # 返回结果为DataFrame
    print('lixiao')
    print(user)
    rec_items = recommend(user)

    for i in range(n_rec_items):
        item = rec_items.iloc[i]['item_id']

        if item in user_records_test['item_id'].values:
            n_hits += 1
        all_rec_items.add(item)

    # 计算rmse
    for i in range(user_records_test.shape[0]):
        item = user_records_test.iloc[i]['item_id']
        score = user_records_test.iloc[i]['rating']

        df1 = rec_items[rec_items.item_id == item]
        if (df1.shape[0] == 0):  # item在训练集中没有出现过，新item不能被协同过滤推荐
            print(str(item) + ' is a new item.\n')
            continue
        pred_score = df1['score'].values[0]
        rss_test += (pred_score - score) ** 2  # 残差平方和

    # 推荐的item总数
    n_total_rec_items += n_rec_items

    # 真实item的总数
    n_test_items += user_records_test.shape[0]

# Precision & Recall
precision = n_hits / (1.0 * n_total_rec_items)
recall = n_hits / (1.0 * n_test_items)

# 覆盖度：推荐商品占总需要推荐商品的比例
coverage = len(all_rec_items) / (1.0 * n_items)

# 打分的均方误差
rmse = np.sqrt(rss_test / df_triplet_test.shape[0])
print("The precision is", precision)
print("The recall is", recall)
print("The coverage is", coverage)
print("The rmse is", rmse)
