# -*- coding: utf-8 -*-
"""
Created on Tue Feb  9 20:51:32 2021

@author: lenovo
"""

import pandas as pd
import xlwt #写入excel需要导入的包
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans 
from matplotlib import pyplot as plt


#读取数据
def readData(file1,file2):
    movies = pd.read_csv(file1)
    ratings = pd.read_csv(file2)
    return movies,ratings

#划分训练集和测试集
def divideData(ratings):
    rate = 0.8
    train_ratings = ratings.sample(frac = rate,replace = False,random_state = None)
    #这里划分数据采用了先在原始数据集上添加训练集，然后再去重就得到了测试集
    test_ratings = ratings.append(train_ratings)  
    test_ratings = test_ratings.drop_duplicates(subset=['userId', 'movieId', 'rating','timestamp'],keep=False)
    return train_ratings,test_ratings

#创建用户-电影-类别数据
def userMovieGenreData(movies,train_ratings):
    user_movie_genre_DF = train_ratings[['userId','movieId']]
    user_movie_genre_DF.insert(2,'genres',0)
    #将movies中的movieId列和genres列转变为字典，每部电影都有特定的类别
    movie_genre_dict = {}
    for index, row in movies.iterrows():
        movie_genre_dict[row['movieId']] = row['genres']
    #根据用户所观看的movieId将其电影的类别标注上去
    for index,row in user_movie_genre_DF.iterrows():
        user_movie_genre_DF.loc[index,'genres'] = movie_genre_dict[row['movieId']]
    user_movie_genre_list = []#创建一个空的list
    for index,row in user_movie_genre_DF.iterrows():
        for genre in row['genres'].split('|'):
            user_movie_genre_list.append([row['userId'],row['movieId'],genre])
    #将列表转化成Dataframe
    user_movie_genre_DF = pd.DataFrame(user_movie_genre_list)
    user_movie_genre_DF.columns = ['userId','movieId','genre']#修改列名
    #将user_movie_genre_DF写入excel
    user_movie_genre_DF.to_excel('user_movie_genre.xlsx',sheet_name='user_movie_genre') 
    return user_movie_genre_DF

#计算用户兴趣度矩阵
def userInterestMax(train_ratings,user_movie_genre_DF):
    #创建用户-类别数据框，用于计算用户兴趣度
    userId = train_ratings['userId'].value_counts().sort_index().index #所有用户ID，从小到大排序
    rows = len(userId)
    cols = 20
    user_genre_DF = pd.DataFrame(np.zeros((rows,cols)))#创建一个610*20的全0数据框
    user_genre_DF.index = userId 
    user_genre_DF.rename(columns={0:'Action',1:'Adventure',2:'Animation',3:'Children',
                                        4:'Comedy',5:'Crime',6:'Documentary',7:'Drama',
                                        8:'Fantasy',9:'Film-Noir',10:'Horror',11:'Musical',
                                        12:'Mystery',13:'Romance',14:'Sci-Fi',15:'Thriller',
                                       16:'War',17:'Western',18:'(no genres listed)',19:'IMAX'},inplace=True)
    for uid in userId:
        ui = user_movie_genre_DF[user_movie_genre_DF['userId']==uid]
        ui_dict = dict(ui['genre'].value_counts())
        for genre in ui_dict.keys():
            user_genre_DF.loc[uid,genre] = ui_dict[genre]
    #把user_genre_DF写入excel中
    user_genre_DF.to_excel('user_genre.xlsx',sheet_name='user_genre') 
    
    #计算用户兴趣度矩阵
    InterestMax = user_genre_DF
    for index,row in InterestMax.iterrows():
        InterestMax.loc[index] = InterestMax.loc[index]/InterestMax.loc[index].sum()
    #把InterestMax写入excel内
    InterestMax.to_excel('InterestMax.xlsx',sheet_name='InterestMax')
    return userId,InterestMax

#根据用户兴趣度矩阵InterestMax进行K-means聚类
def userCommunity(userId,InterestMax,k,Xn):
    estimator = KMeans(n_clusters=k) #构造一个聚类数为k的聚类器 
    estimator.fit(Xn) #聚类 
    label_pred = estimator.labels_   #获取聚类标签 
    #用户社区划分——根据兴趣度聚类
    userCommunity = pd.DataFrame(label_pred)
    userCommunity.index = userId
    userCommunity.rename(columns={0:'clusterId'},inplace=True)
    #把用户社区写入excel内
    userCommunity.to_excel('userCommunity.xlsx',sheet_name='userCommunity') 
    return userCommunity
   
#创建函数：计算余弦相似度
def cosine_similarity(x,y):
    num = x.dot(y.T)
    denom = np.linalg.norm(x) * np.linalg.norm(y)
    return num / denom

#计算用户与用户之间兴趣度的相似度矩阵，相似度算法：余弦相似度
def similarityInterest(userId,InterestMax):
    rows = len(userId)
    Similarity_Interest_Max = pd.DataFrame(np.zeros((rows,rows)))
    Similarity_Interest_Max.index = userId
    Similarity_Interest_Max.columns = userId
    for ui in userId:
        for uj in userId:
            Similarity_Interest_Max.loc[ui,uj] = cosine_similarity(
                    InterestMax.loc[ui].values,InterestMax.loc[uj].values)
    #把用户兴趣度相似度矩阵写入excel内
    Similarity_Interest_Max.to_excel('Similarity_Interest.xlsx',sheet_name='Similarity_Interest') 
    return Similarity_Interest_Max

#创建一个用户-电影-评分矩阵，横：用户；列：电影；值：评分
def userMovieRating(movies,train_ratings):
    userId = train_ratings['userId'].value_counts().sort_index().index
    movieId = movies['movieId'].value_counts().sort_index().index
    rows = len(userId)
    cols = len(movieId)
    user_movie_rating = pd.DataFrame(np.zeros((rows,cols)))#创建一个610*9742的全0数据框
    user_movie_rating.columns = movieId #将列名改为movieId
    user_movie_rating.index = userId
    for uid in userId:
        ui = train_ratings[train_ratings['userId'] == uid]
        for index,row in ui.iterrows():
            user_movie_rating.loc[uid,row['movieId']] = row['rating']
    #写入excel
    user_movie_rating.to_excel('user_movie_rating.xlsx',sheet_name='user_movie_rating') 
    return user_movie_rating

#创建函数：计算修正余弦相似度
def adjust_cosine_similarity(x,y):
    x_adjust = x - np.mean(x)
    y_adjust = y - np.mean(y)
    num = x_adjust.dot(y_adjust.T)
    denom = np.linalg.norm(x_adjust) * np.linalg.norm(y_adjust)
    return num / denom

#根据用户评分矩阵计算用户之间的相似度，相似度算法：修正余弦相似度
def similarityRating(userId,user_movie_rating):
    rows = len(userId)
    #计算用户与用户之间的相似度矩阵
    Similarity_rating_Max = pd.DataFrame(np.zeros((rows,rows)))
    Similarity_rating_Max.index = userId
    Similarity_rating_Max.columns = userId
    for ui in userId:
        for uj in userId:
            Similarity_rating_Max.loc[ui,uj] = adjust_cosine_similarity(
                    user_movie_rating.loc[ui].values,user_movie_rating.loc[uj].values)
    #把用户相似度矩阵写入excel内
    Similarity_rating_Max.to_excel('Similarity_rating.xlsx',sheet_name='Similarity_rating') 
    return Similarity_rating_Max

#确定邻近用户集
def NearestUserSet(c,test_id,userCommunity,Similarity_Mix_Max):
    test_id_community = userCommunity[userCommunity['clusterId'] == userCommunity.loc[test_id,'clusterId']]
    test_id_community = test_id_community.index
    Similarity_users = pd.DataFrame(np.zeros((len(test_id_community),1)))
    Similarity_users.index = test_id_community
    for comid in test_id_community:
        Similarity_users.loc[comid,0] = Similarity_Mix_Max.loc[test_id,comid]
    NearestUser = Similarity_users.sort_values(by=0,ascending=False).index[0:c]
    return NearestUser

#计算特定用户的MAE和RSME，即创建evaluate数据框，存放特定用户的预测评分和实际评分
def evaluate_peruser(test_id,test_ratings,train_ratings,Similarity_Mix_Max,
             user_movie_rating,test_user_movie_rating,NearestUser):
    #待预测评分的电影
    test_movies = test_ratings[test_ratings['userId'] == test_id]['movieId'].values
    #test_id这名用户已经评过的所有电影的平均评分
    mRating_testId = np.mean(train_ratings[train_ratings['userId'] == test_id]['rating'].values)
    #创建一个数据框，用于存放电影的预测评分和真实评分
    evaluate = pd.DataFrame(np.zeros((len(test_movies),3)))
    evaluate.index = test_movies
    evaluate.rename(columns={0:'predict',1:'actual',2:'diff'},inplace=True)
    #计算电影的预测评分
    for movie_id in test_movies:
        fenzi = 0
        fenmu = 0
        for near_userId in NearestUser[1:]:
            sim = Similarity_Mix_Max.loc[test_id,near_userId]
            rating = user_movie_rating.loc[near_userId,movie_id]
            mRating_nearId = np.mean(train_ratings[train_ratings['userId'] == near_userId]['rating'].values)
            if(rating == 0):rating = mRating_nearId
            fenzi = fenzi + sim*(rating - mRating_nearId)
            fenmu = fenmu + sim
        predict_rating = mRating_testId + fenzi/fenmu
        evaluate.loc[movie_id,'predict'] = predict_rating
        evaluate.loc[movie_id,'actual'] = test_user_movie_rating.loc[test_id,movie_id]
        #给evaluate插入一列，用于存放预测值和实际值的差值
        evaluate['diff'] = (evaluate['predict'] - evaluate['actual']).abs()
    return evaluate

#计算整体的MAE和RSME，即首先计算每个用户的平均MAE和RSME，然后再计算所有用户的平均MAE和RSME
def MAERMSE(test_ratings,train_ratings,c,userCommunity,Similarity_Mix_Max,
            user_movie_rating,test_user_movie_rating):
    #测试集
    test_userId = test_ratings['userId'].value_counts().sort_index().index
    #创建一个user-MAE-RMSE矩阵
    user_MAE_RMSE = pd.DataFrame(np.zeros((len(test_userId),2)))
    user_MAE_RMSE.index = test_userId
    user_MAE_RMSE.rename(columns={0:'mMAE',1:'mRMSE'},inplace=True)
    #计算每个用户的平均MAE和RSME
    for test_id in test_userId:
        #确定该test_id用户的邻近用户集合
        NearestUser = NearestUserSet(c,test_id,userCommunity,Similarity_Mix_Max)
        evaluate = evaluate_peruser(test_id,test_ratings,train_ratings,Similarity_Mix_Max,
                            user_movie_rating,test_user_movie_rating,NearestUser)
        MAE = evaluate['diff'].sum()/len(evaluate['diff'])
        RMSE = np.sqrt(np.sum(np.square(evaluate['diff']))/len(evaluate['diff']))
        user_MAE_RMSE.loc[test_id,'mMAE'] = MAE
        user_MAE_RMSE.loc[test_id,'mRMSE'] = RMSE
    #计算所有用户的平均MAE和RSME
    all_MAE = user_MAE_RMSE['mMAE'].mean()
    all_RMSE = user_MAE_RMSE['mRMSE'].mean()
    return all_MAE,all_RMSE
    

def main():
    file1 = r'ml-latest-small/movies.csv'
    file2 = r'ml-latest-small/ratings.csv'
    movies,ratings = readData(file1,file2)
    train_ratings,test_ratings = divideData(ratings)
    user_movie_genre_DF = userMovieGenreData(movies,train_ratings)
    userId,InterestMax = userInterestMax(train_ratings,user_movie_genre_DF)
    '''
    #K-means聚类，选择最佳的k值
    Xn = InterestMax.as_matrix()
    #利用SSE选择k值
    SSE = []  # 存放每次结果的误差平方和
    for k in [2,4,6,8,10,12,14,16]:
        estimator = KMeans(n_clusters=k)  # 构造聚类器
        estimator.fit(Xn)
        SSE.append(estimator.inertia_)
    X = [2,4,6,8,10,12,14,16]
    plt.xlabel('the num of the k')
    plt.ylabel('SSE')
    plt.plot(X, SSE, 'o-')
    plt.show()
    '''
    k = 8
    #聚类
    userCommunity = userCommunity(userId,InterestMax,k,Xn)
    Similarity_Interest_Max = similarityInterest(userId,InterestMax)
    user_movie_rating = userMovieRating(movies,train_ratings)
    Similarity_rating_Max = similarityRating(userId,user_movie_rating)
    interest_sim = Similarity_Interest_Max.values
    rating_sim = Similarity_rating_Max.values
    #计算混合相似度
    omega = 0.4
    mix_sim =  omega*rating_sim + (1-omega)*interest_sim
    Similarity_Mix_Max = pd.DataFrame(mix_sim,index=userId,columns=userId)
    
    #将测试集转化成测试集版的用户-电影-评分矩阵
    test_user_movie_rating = userMovieRating(movies,test_ratings)
    c = 20 #确定邻近用户个数
    all_MAE,all_RSME = MAERMSE(test_ratings,train_ratings,c,userCommunity,Similarity_Mix_Max,
            user_movie_rating,test_user_movie_rating)
    print(all_MAE,all_RSME)
    
    '''
    #确定最优的平衡因子omega和近邻用户个数c
    omega_chooce = [0.2,0.4,0.6,0.8]
    c_chooce = [5,10,15,20,25,30,35]
    #创建一个数据框，用于存放每一个omega和c值对应的MAE值
    omega_c_MAE = pd.DataFrame(np.zeros((len(omega_chooce)*len(c_chooce),4)))
    omega_c_MAE.rename(columns={0:'omega',1:'c',2:'MAE',3:'RMSE'},inplace=True)
    i = 0
    for omega in omega_chooce:
        for c in c_chooce:
            mix_sim =  omega*rating_sim + (1-omega)*interest_sim
            Similarity_Mix_Max = pd.DataFrame(mix_sim,index=userId,columns=userId)
            all_MAE,all_RMSE = MAERMSE(test_ratings,train_ratings,c,userCommunity,Similarity_Mix_Max,
                                       user_movie_rating,test_user_movie_rating)
            omega_c_MAE.loc[i,'omega'] = omega
            omega_c_MAE.loc[i,'c'] = c
            omega_c_MAE.loc[i,'MAE'] = all_MAE
            omega_c_MAE.loc[i,'RMSE'] = all_RMSE
            print(i,omega,c,all_MAE,all_RMSE)
            i += 1
    #将omega_c_MAE写入excel
    omega_c_MAE.to_excel('omega_c_MAE.xlsx',sheet_name='omega_c_MAE')
    '''

            
            
            
            
            
            
            