# -*- coding: utf-8 -*-

#ref: http://blog.csdn.net/lsldd/article/details/41598751

from matplotlib import pyplot
import scipy as sp
import numpy as np
from matplotlib import pylab
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.metrics import classification_report

import time
from scipy import sparse




import numpy as np
import pandas as pd

header = ['user_id', 'item_id', 'rating', 'timestamp']
df = pd.read_csv('Data/dataset/ml-100k/u.data', sep='\t', names=header)

n_users = df.user_id.unique().shape[0]
n_items = df.item_id.unique().shape[0]
print('Number of users = ' + str(n_users) + ' | Number of movies = ' + str(n_items))


from sklearn import cross_validation as cv
train_data, test_data = cv.train_test_split(df, test_size=0.25)

# Create two user-item matrices, one for training and another for testing
train_data_matrix = np.zeros((n_users, n_items))
for line in train_data.itertuples():
    train_data_matrix[line[1] - 1, line[2] - 1] = line[3]

test_data_matrix = np.zeros((n_users, n_items))
for line in test_data.itertuples():
    test_data_matrix[line[1] - 1, line[2] - 1] = line[3]


from sklearn.metrics.pairwise import pairwise_distances
user_similarity = pairwise_distances(train_data_matrix, metric='cosine')
item_similarity = pairwise_distances(train_data_matrix.T, metric='cosine')
from sklearn.metrics.pairwise import cosine_similarity
user_similarity1 = cosine_similarity(train_data_matrix, dense_output=True)
item_similarity1 = cosine_similarity(train_data_matrix.T, dense_output=True)

def predict(ratings, similarity, type='user'):
    if type == 'user':
        mean_user_rating = ratings.mean(axis=1)
        # You use np.newaxis so that mean_user_rating has same format as ratings
        ratings_diff = (ratings - mean_user_rating[:, np.newaxis])
        pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array(
            [np.abs(similarity).sum(axis=1)]).T
    elif type == 'item':
        pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
    return pred


item_prediction = predict(train_data_matrix, item_similarity, type='item')
user_prediction = predict(train_data_matrix, user_similarity, type='user')


from sklearn.metrics import mean_squared_error
from math import sqrt
def rmse(prediction, ground_truth):
    prediction = prediction[ground_truth.nonzero()].flatten()
    ground_truth = ground_truth[ground_truth.nonzero()].flatten()
    return sqrt(mean_squared_error(prediction, ground_truth))


print('User-based CF RMSE: ' + str(rmse(user_prediction, test_data_matrix)))
print('Item-based CF RMSE: ' + str(rmse(item_prediction, test_data_matrix)))









start_time = time.time()


def show_data(x_p, y_p):
    y = sparse.csc_matrix((y_p, x_p.T), dtype=np.float).todense()
    nUser, nItem = y.shape

    # y = y[:100,:100]
    # nUser, nItem = y.shape
    # 可视化矩阵
    pyplot.imshow(y, interpolation='nearest')
    pyplot.xlabel('Item')
    pyplot.ylabel('User')
    # pyplot.xticks(range(nItem))
    # pyplot.yticks(range(nUser))
    pyplot.show()

# 计算向量test与data数据每一个向量的相关系数，data一行为一个向量
def calc_relation(testfor, data):
    all =[]
    for c in data:
        #皮尔森相关系数计算:http://jingyan.baidu.com/article/a378c9609b5849b328283093.html
        correlation = np.corrcoef(testfor, c)
        all.append(correlation[0,1])
    return np.array(all)
    #above and below code are same logic
    #return np.array([np.corrcoef(testfor, c)[0, 1] for c in data])


# luispedro提供的加速函数:
def all_correlations(y, x):
    x = np.asanyarray(x, float)
    y = np.asanyarray(y, float)
    xy = np.dot(x, y)
    y_ = y.mean()
    ys_ = y.std()
    x_ = x.mean(1)
    xs_ = x.std(1)
    n = float(len(y))
    ys_ += 1e-5  # Handle zeros in ys
    xs_ += 1e-5  # Handle zeros in x
    return (xy - x_ * y_ * n) / n / xs_ / ys_


# 数据读入
data = np.loadtxt('Data/dataset/ml-100k/u.data')
x_p = data[:, :2]  # 取前2列
y_p = data[:, 2]  # 取列3
x_p -= 1  # 0为起始索引


show_data(x_p,y_p)

# 加载数据集，切分数据集80%训练，20%测试
x_p_train, x_p_test, y_p_train, y_p_test = train_test_split(x_p, y_p, test_size=0.25)
train_data_matrix = sparse.csc_matrix((y_p_train, x_p_train.T),dtype=np.float).todense()
test_data_matrix = sparse.csc_matrix((y_p_test, x_p_test.T), dtype=np.float).todense()

nUser, nItem = train_data_matrix.shape


#from sklearn.metrics.pairwise import pairwise_distances
#user_similarity = pairwise_distances(train_data_matrix, metric='cosine')
#item_similarity = pairwise_distances(train_data_matrix.T, metric='cosine')
from sklearn.metrics.pairwise import cosine_similarity
user_similarity = cosine_similarity(train_data_matrix, dense_output=True)
item_similarity = cosine_similarity(train_data_matrix.T, dense_output=True)

# 训练
#item_similarity = np.zeros((nItem, nItem))
#for i in range(nItem):
#    item_similarity[i] = calc_relation(M[:, i].T, M.T)
#    item_similarity[i, i] = -1


def predict(ratings, similarity, type='user'):
    if type == 'user':
        mean_user_rating = ratings.mean(axis=1)
        # You use np.newaxis so that mean_user_rating has same format as ratings
        #ratings_diff = (ratings - mean_user_rating[:, np.newaxis])
        #pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T
        ratings_diff = ratings - mean_user_rating
        pred = mean_user_rating + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T
    elif type == 'item':
        pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
    return pred


item_prediction = predict(train_data_matrix, item_similarity, type='item')
user_prediction = predict(train_data_matrix, user_similarity, type='user')


from sklearn.metrics import mean_squared_error
from math import sqrt
def rmse(prediction, ground_truth):
    prediction = prediction[ground_truth.nonzero()].flatten()
    ground_truth = ground_truth[ground_truth.nonzero()].flatten()
    return sqrt(mean_squared_error(prediction, ground_truth))


print('User-based CF RMSE: ' + str(rmse(user_prediction, test_data_matrix)))
print('Item-based CF RMSE: ' + str(rmse(item_prediction, test_data_matrix)))



#for t in range(item_similarity.shape[1]):
#    item = item_similarity[t].argsort()[-3:]
#    print("Buy Item %d will buy item %d,%d,%d " % (t, item[0], item[1], item[2]))





print("time spent:", time.time() - start_time)