import pandas as pd
import numpy as nm
import random


# 整体方法说明
# 1、读入交互数据文档
# 2、从交互数据中建立两个邻接矩阵，分别是interacted和liked
#    前者是算最终结果用的，没阈值要求；后者是一开始算权重用的，有阈值要求；详见相关代码；
# 3、基于liked_graph先算出一个权重矩阵weights
# 4、在liked_graph基础上提取稀疏子图sparse_liked_graph（需要开发）
# 5、重复上边的计算，得到稀疏权重矩阵sparse_weights（需要开发）
# 6、把两个权重矩阵加起来，得到sum_weights（需要开发）
# 7、基于sum_weights得到信息扩散后的结果，保存为新的csv（需要开发）


# 包装成一个函数，叫adjacent_random_augment,输入输出都是类似liked_graph那样的邻接矩阵
# 注意函数里自带两个变量A和B，是(0, 1)的参数，先定义好，A=0.4，B=0.5
# 函数的输入是liked_graph
# 函数的任务是统计liked_graph里零元素的数量，在liked_graph里选比例为A的零元素，具体数量向下取整
# 然后把这些元素的值从0变成B，得到的结果存在一个新矩阵
# 输出得到的新矩阵变量，叫random_augmented_like_graph
def adjacent_random_augment(liked_graph, A=0.1, B=1):
    augmented_liked_graph = liked_graph.copy()
    zero_index = nm.argwhere(augmented_liked_graph == 0)
    num_of_zero = len(zero_index)
    sample_num = int(num_of_zero * A)
    random_index = random.sample(zero_index.tolist(), sample_num)
    for index in random_index:
        augmented_liked_graph[index[0], index[1]] = B
    return augmented_liked_graph


# 这里输入的是weights矩阵，item_num * item_num的矩阵
# 函数的工作是把矩阵上对角线的元素都改成1
# 也就是横纵坐标相同的元素值改成1
# 输出存在新的矩阵里，叫reset_weights
def reset_weight(weights):
    reset_weights = weights.copy()
    # 对角线改1
    nm.fill_diagonal(reset_weights, 1)
    return reset_weights


data_name = 'fsl'
# data_name = 'dvd'
# data_name = 'ml'
# data_num = '1k'
# data_num = '5k'
data_num = '1w'
print('读取文件：{}-{}'.format(data_name, data_num))

# 读取初始交互数据
original_interactions = pd.read_csv('../datasets/{}/{}-{}.csv'.format(data_name, data_name, data_num))

# 求出用户数和项目数
num_of_user = len(list(set(original_interactions['userid'])))
num_of_item = len(list(set(original_interactions['itemid'])))
print('num of user:', num_of_user)
print('num of item:', num_of_item)
# 创建已交互矩阵，有交互行为就算
interacted_graph = nm.zeros([num_of_item, num_of_user])
# 创建喜欢图邻接矩阵，超过阈值的交互才算喜欢
liked_graph = nm.zeros([num_of_item, num_of_user])

# 根据评分对矩阵进行初始化
# 不切分训练集和测试集，都放进矩阵里
# 注意行号是项目id，列号是用户id，都是从0开始的
# interacted_graph装用户和项目交互
# liked_graph装的是评分超过阈值的交互记录，这里阈值是3
for index, row in original_interactions.iterrows():
    interacted_graph[int(row[1]), int(row[0])] = 1
    if int(row[2]) >= 3.0:
        liked_graph[int(row[1]), int(row[0])] = 1

inter_nozero_count = nm.count_nonzero(interacted_graph)
liked_nozero_count = nm.count_nonzero(liked_graph)
print('interacted_graph非0元素：', inter_nozero_count)
print('liked_graph非零元素：', liked_nozero_count)

# ////////////////////////////////////////////////
# ****************稀疏结点子图的计算******************

# 统计用户和项目的度，即用户看过的项目，和项目被看过的用户数量
item_degree_vector = nm.zeros([num_of_item])
user_degree_vector = nm.zeros([num_of_user])

# 求每个项目的度，存在对应向量里
for item_id in range(num_of_item):
    item_degree_vector[item_id] = liked_graph[item_id, :].sum()

# 求每个用户的度，存在对应向量里
for user_id in range(num_of_user):
    user_degree_vector[user_id] = liked_graph[:, user_id].sum()

# 这里计算下项目的度的中位数
# 然后把低于中位数（不含中位数）的项目id记录下来
# 存进item_ids_under_middle
item_ids_under_middle = []
item_degree_middle = nm.median(item_degree_vector)
for item_id in range(num_of_item):
    if item_degree_vector[item_id] < item_degree_middle:
        item_ids_under_middle.append(item_id)
# 同理计算下用户的度的中位数
# 然后把低于中位数（不含中位数）的用户id记录下来
# 存进user_ids_under_middle
user_ids_under_middle = []
user_degree_middle = nm.median(user_degree_vector)
for user_id in range(num_of_user):
    if user_degree_vector[user_id] < user_degree_middle:
        user_ids_under_middle.append(user_id)
# 对liked_graph进行遍历
# 如果行id属于item_ids_under_middle，则该行的值里，0值变成参数D
# 如果列id属于user_ids_under_middle，则该列的值里，0值变成参数D
# 得到的结果存在一个新矩阵里，sparse_liked_graph
D = 0.7
sparse_liked_graph = liked_graph.copy()
for item_id in range(num_of_item):
    if item_id not in item_ids_under_middle:
        for user_id in range(num_of_user):
            if sparse_liked_graph[item_id, user_id] == 0:
                sparse_liked_graph[item_id][user_id] = D
for user_id in range(num_of_user):
    if user_id not in user_ids_under_middle:
        for item_id in range(num_of_item):
            if sparse_liked_graph[item_id, user_id] == 0:
                sparse_liked_graph[item_id][user_id] = D

sparse_liked_graph_nozero_count = nm.count_nonzero(sparse_liked_graph)
print('sparse_liked_graph非0元素：', sparse_liked_graph_nozero_count)

# ****************稀疏结点子图的计算到此结束******************
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\


# //////////////////////////////////////////////////////
# ***********这里开始基于随机增强的矩阵来计算权重**************
random_augment_liked_graph = adjacent_random_augment(liked_graph)
random_liked_nozero_count = nm.count_nonzero(random_augment_liked_graph)
print('random_augment_liked_graph非0元素：', random_liked_nozero_count)

# 统计用户和项目的度，即用户看过的项目，和项目被看过的用户数量
ra_item_degree_vector = nm.zeros([num_of_item])
ra_user_degree_vector = nm.zeros([num_of_user])

# 求每个项目的度，存在对应向量里
for item_id in range(num_of_item):
    ra_item_degree_vector[item_id] = nm.count_nonzero(random_augment_liked_graph[item_id, :])
# 求每个用户的度，存在对应向量里
for user_id in range(num_of_user):
    ra_user_degree_vector[user_id] = nm.count_nonzero(random_augment_liked_graph[:, user_id])

# 计算每个用户未交互项目的度
ra_user_interact_item_num_total = nm.ones(num_of_user)
ra_user_interact_item_num_total *= num_of_item
ra_user_not_interact_item_num = ra_user_interact_item_num_total - ra_user_degree_vector

# 为防止之后的除法出现0，手动将0值改为极大值
for item_id in range(num_of_item):
    if ra_item_degree_vector[item_id] == 0.0:
        ra_item_degree_vector[item_id] = 99999
for user_id in range(num_of_user):
    if ra_user_degree_vector[user_id] == 0.0:
        ra_user_degree_vector[user_id] = 99999

# 求资源配额矩阵
ra_weights = nm.zeros([num_of_item, num_of_item])
# 转换为矩阵乘法和向量除法
# 设定若干中间值
ra_liked_graph_t = random_augment_liked_graph.transpose()
ra_temp = nm.zeros([num_of_user, num_of_item])

for i in range(num_of_item):
    ra_temp[:, i] = ra_liked_graph_t[:, i] / ra_user_degree_vector
ra_temp = nm.dot(random_augment_liked_graph, ra_temp)
for i in range(num_of_item):
    ra_weights[i, :] = ra_temp[i, :] / ra_item_degree_vector

# 基于random_augment_liked_graph的权重计算到这里结束
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
ra_weights_nozero_count = nm.count_nonzero(ra_weights)
print('ra_weights非0元素：', ra_weights_nozero_count)

# ******************稀疏子图分配矩阵的计算*****************
# 按照前边基于random_augment_liked_graph计算出weights的操作
# 在sparse_liked_graph上进行相同的计算过程，得到sparse_weights

# 统计稀疏用户和项目的度，即用户看过的项目，和项目被看过的用户数量
sparse_item_degree_vector = nm.zeros([num_of_item])
sparse_user_degree_vector = nm.zeros([num_of_user])

# 求稀疏项目的度，存在对应向量里
# 注意核心的变量名都要改成sparse开头以示区分
# 后续的变量处理同这里一样
for item_id in range(num_of_item):
    sparse_item_degree_vector[item_id] = sparse_liked_graph[item_id, :].sum()
# 求稀疏用户的度，存在对应向量里
for user_id in range(num_of_user):
    sparse_user_degree_vector[user_id] = sparse_liked_graph[:, user_id].sum()

# 计算每个用户未交互项目的度
sparse_user_interact_item_num_total = nm.ones(num_of_user)
sparse_user_interact_item_num_total *= num_of_item
sparse_user_not_interact_item_num = sparse_user_interact_item_num_total - sparse_user_degree_vector

# 为防止之后的除法出现0，手动将0值改为极大值
for item_id in range(num_of_item):
    if sparse_item_degree_vector[item_id] == 0.0:
        sparse_item_degree_vector[item_id] = 99999
for user_id in range(num_of_user):
    if sparse_user_degree_vector[user_id] == 0.0:
        sparse_user_degree_vector[user_id] = 99999

# 求稀疏资源配额矩阵
sparse_weights = nm.zeros([num_of_item, num_of_item])
# 对稀疏喜欢图进行转置
sparse_liked_graph_t = sparse_liked_graph.transpose()
sparse_temp = nm.zeros([num_of_user, num_of_item])
# 注意这里的变量也要换成稀疏的
for i in range(num_of_item):
    sparse_temp[:, i] = sparse_liked_graph_t[:, i] / sparse_user_degree_vector
sparse_temp = nm.dot(sparse_liked_graph, sparse_temp)
for i in range(num_of_item):
    sparse_weights[i, :] = sparse_temp[i, :] / sparse_item_degree_vector

sparse_weights_nozero_count = nm.count_nonzero(sparse_weights)
print('sparse_weights非0元素：', sparse_weights_nozero_count)

# 这里把ra_weights和sparse_weights加起来，得到sum_weights
# 然后对sum_weights进行重置
sum_weights = reset_weight(ra_weights + sparse_weights)
sum_weights_nozero_count = nm.count_nonzero(sum_weights)
print('sum_weights非0元素：', sum_weights_nozero_count)
# 求各个用户的资源分配矩阵
locate = nm.matmul(sum_weights, interacted_graph)

# 将处理结果以列表形式存储
result = []

for item_id in range(len(locate)):
    for user_id in range(len(locate[item_id])):
        data = [user_id, item_id, locate[item_id][user_id]]
        data_locate = locate[item_id][user_id]
        temp_data = list(data)
        result.append(temp_data)

# print(result)


# ***************处理结果保存****************
# 在输入的csv文件基础上，更新评分，得到新的csv文件
# 基于result里保存的元素值，更新csv里的评分
# csv[user_id][item_id] = result[item_id][user_id] * C
# 如果csv得到的评分值大于5，就截断成5
# C这个参数默认是6，可以改
# 最终得到新的csv，就在原文件名基础上加augment_前缀
# 比如本算法的输入是fsl_1k.csv，那输出就叫augment_fsl_1k.csv
C = 200
df_result = pd.DataFrame(
    {'userid': [i[0] for i in result], 'itemid': [i[1] for i in result], 'pre_rating': [i[2] for i in result]})
df_original = original_interactions.copy()
# df_original.drop(['rating'],axis=1,inplace=True)
# merge快速匹配更新
df_final = pd.DataFrame(pd.merge(df_original, df_result, on=['userid', 'itemid']))

pre_rating = df_final['pre_rating'].values.tolist()

final_rating = []
for i in pre_rating:
    i = i * C - C
    if i > 5:
        i = 5
    final_rating.append(i)

df_final['pre_rating'] = final_rating
print(df_final)
# df_final.to_csv('file_saved/augment_{}-{}.csv'.format(data_name,data_num),index=None)

df_tomodel = df_final.copy()
df_tomodel.drop(['pre_rating'], axis=1, inplace=True)
df_tomodel['rating'] = final_rating
df_tomodel.to_csv('../datasets/{}/srms_{}-{}.csv'.format(data_name, data_name, data_num), index=None)

listBins = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.5, 2.0, 3.0, 10000]

# 设置切分后对应标签
listLabels = ['0-01', '01-02', '02_03', '03_04', '04_05', '05_06', '06_07', '07_08', '08_09', '09_10', '10_15', '15_20',
              '20_30', '3以上']

aaa = pd.cut(df_final['pre_rating'], bins=listBins, labels=listLabels, include_lowest=True)
aaa = pd.DataFrame(aaa)
print(aaa.apply(pd.value_counts))
