#!/usr/bin/env python
# coding: utf-8

# In[103]:


import numpy as np
import pandas as pd
import scipy.stats as st
import seaborn as sns
sns.set_style('whitegrid')
import matplotlib.pyplot as plt 


# In[104]:


stm = pd.read_csv(r"D:\Desk\steam.csv")
stm


# In[105]:


#这一步想去掉购买后又没玩的游戏
#stm_play = stm.loc[stm['is_played'] == 'play']
#stm_play
stm.loc[stm['is_played']=='purchase','hours'] = 0.0
stm#将买了的时间置0


# In[106]:


alaytime1 = stm.iloc[:,[0,1,4]]#取指定列
alaytime1


# In[107]:


alaytime1=alaytime1.drop_duplicates(subset=['user_id', 'game_id'], keep='last')#删除重复的purchase，但是只买不玩游戏的不删，直接是0评分


# In[108]:


alaytime2 = stm.iloc[:,[0,4]]


# In[109]:


alaytime3=alaytime2.groupby(['user_id']).sum()#求相同userid的所有
alaytime3.rename(columns={'hours':'allhours'},inplace=True)
alaytime3


# In[110]:


neww=pd.merge(alaytime1,alaytime3,left_on='user_id',right_on='user_id')#新表数据有时间和所有时间
neww.head(10)


# In[111]:


neww['new_value'] = 10*neww['hours'] /neww['allhours'] 
neww.fillna(0, inplace=True)
neww.head(30)


# In[113]:


neww.to_csv(r"D:\Desk\otherdata.csv")
userids=stm.user_id.unique()
gameids=stm.game_id.unique()


# In[114]:


import seaborn as sns 
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,10))

all_user_minmax=[]
for id in userids:
    alluser_ids =neww.loc[neww['user_id']==id]
    #print(alluser_ids)
    alluser_minmax=scaler.fit_transform(np.array(alluser_ids['new_value']).reshape(-1,1))
    neww.loc[neww['user_id']==id,'new_value'] = alluser_minmax
    all_user_minmax.append(alluser_minmax)#对应用户对玩过的游戏打分,去除最小最大
#all_user_minmax


# In[115]:


neww.to_csv(r"D:\Desk\otherdata.csv")
#len(all_user_minmax)
neww


# In[116]:


df=None
def dataSet2Matrix(filename):
    """
       导入训练数据
       :param filename: 数据文件路径
    """
    # 按照','分割读取csv文件
    data = pd.read_csv(filename)
    global df
    # 转换成User-Item矩阵
    df = data.pivot(index='user_id', columns='game_id', values='hours')
    
# 欧几里德距离
def euclidean(user_id1, user_id2):
    x, y = build_xy(user_id1, user_id2)
    try:
        value = sum((x - y)**2)**0.5
    except ZeroDivisionError:
        value = 0
    return value
# 皮尔逊相关系数
def pearson(user_id1, user_id2):
    x, y = build_xy(user_id1, user_id2)
    mean1, mean2 = x.mean(), y.mean()
    # 分母
    denominator = (sum((x-mean1)**2)*sum((y-mean2)**2))**0.5
    try:
        value = sum((x - mean1) * (y - mean2)) / denominator
    except ZeroDivisionError:
        value = 0
    return value

# 余弦相似度
def cosine(user_id1, user_id2):
    x, y = build_xy(user_id1, user_id2)
    # 分母
    denominator = (sum(x*x)*sum(y*y))**0.5
    try:
        value = sum(x*y)/denominator
    except ZeroDivisionError:
        value = 0
    return value

#同玩过的游戏
def build_xy(user_id1, user_id2):
    bool_array = df.loc[user_id1].notnull() & df.loc[user_id2].notnull()
    return df.loc[user_id1, bool_array], df.loc[user_id2, bool_array]

metric_funcs = {
    'euclidean': euclidean,
    'pearson': pearson,
    'cosine': cosine
}


# 计算最近的邻居
def computeNearestNeighbor(user_id, metric='pearson', k=3):
    """
    metric: 度量函数
    k:      返回k个邻居
    返回：pd.Series，其中index是邻居名称，values是距离
    """
    if metric in ['manhattan', 'euclidean']:
        return df.drop(user_id).index.to_series().apply(metric_funcs[metric], args=(user_id,)).nsmallest(k)
    elif metric in ['pearson', 'cosine']:
        return df.drop(user_id).index.to_series().apply(metric_funcs[metric], args=(user_id,)).nlargest(k)
    
# 向给定用户推荐（返回：pd.Series）
def recommend(user_id):
    # 找到距离最近的用户id
    nearest_user_id = computeNearestNeighbor(user_id, metric='cosine').index[0]
    print('最近邻用户id：', nearest_user_id)
    # 找出邻居评价过、但自己未曾评价的乐队（或商品）
    # 结果：index是商品名称，values是评分
    return df.loc[nearest_user_id, df.loc[user_id].isnull() & df.loc[nearest_user_id].notnull()].sort_values()


# In[117]:


dataSet2Matrix(r"D:\Desk\otherdata.csv")
df


# In[ ]:





# In[121]:


print(build_xy(3,32))


# In[124]:


#用户之间的相似度
print(pearson(3,32))


# In[125]:


#最相似用户
print(computeNearestNeighbor(3))


# In[ ]:


#预测结果
recommend(3)


# In[ ]:




