import dgl
import torch
from torch import nn
from mlload import get_ml_100k
from torch.utils.data import TensorDataset, DataLoader
from minibatch import MinibatchSampler
from gcmc import rmse, GCMCRating
import tqdm
import numpy as np
import pandas as pd

np.set_printoptions(threshold=np.inf)
# 分别读入分离的交互数据
all_inter = pd.read_csv('all_inter.csv')
only_collect = pd.read_csv('only_collect.csv')
only_commit = pd.read_csv('only_commit.csv')
only_score = pd.read_csv('only_score.csv')
only_duration = pd.read_csv('only_duration.csv')
# 这个是用来生成item向量的总图
# 总图就是，只要用户和项目间存在上述四种交互之一，那interact值就写1的图
# 详见allDataProcess.py和all.csv
only_all = pd.read_csv("all.csv")

# 交互列表
user_list = all_inter['user_id'].values.tolist()
item_list = all_inter['course_id'].values.tolist()
collect_list = only_collect['collect_status'].values.tolist()
commit_list = only_commit['commit_status'].values.tolist()
duration_list = only_duration['duration'].values.tolist()
score_list = only_score['score'].values.tolist()
all_list = only_all['interact'].values.tolist()  # 全量图
# print(max(collect_list), max(commit_list), max(duration_list), max(score_list), max(all_list))

user_ids = torch.LongTensor(user_list)
item_ids = torch.LongTensor(item_list)
collect_ids = torch.LongTensor(collect_list)
commit_ids = torch.LongTensor(commit_list)
duration_ids = torch.LongTensor(duration_list)
score_ids = torch.LongTensor(score_list)
all_ids = torch.LongTensor(all_list)  # 全量图
# 全连接图的建立，准备user的id，item的id，item的id只留训练过的
all_user_ids = list(set(user_ids.tolist()))
all_item_ids = list(set(item_ids.tolist()))
all_user_ids = [val for val in all_user_ids for i in range(len(all_item_ids))]
all_item_ids = all_item_ids * len(set(all_user_ids))
# 课程类别特征处理
onehot = pd.read_csv('onehotdata.csv', header=None)
item_genres = onehot[range(1, 25)].values
num_classify = item_genres.shape[1]
print("课程类别数：", num_classify)


def run():
    # 创建异构图score
    graph_score = dgl.heterograph({
        ('user', 'watched', 'item'): (user_ids, item_ids),
        ('item', 'watchedby', 'user'): (item_ids, user_ids)
    })
    # 本来直接用边类型就可以，但是会报错，只好用全称
    graph_score.edges[('item', 'watchedby', 'user')].data['rating'] = torch.LongTensor(score_ids)
    graph_score.edges[('user', 'watched', 'item')].data['rating'] = torch.LongTensor(score_ids)
    graph_score.nodes['item'].data['classify'] = torch.FloatTensor(item_genres)

    # 创建异构图collect
    graph_collect = dgl.heterograph({
        ('user', 'watched', 'item'): (user_ids, item_ids),
        ('item', 'watchedby', 'user'): (item_ids, user_ids)
    })
    # 本来直接用边类型就可以，但是会报错，只好用全称
    graph_collect.edges[('item', 'watchedby', 'user')].data['rating'] = torch.LongTensor(collect_ids)
    graph_collect.edges[('user', 'watched', 'item')].data['rating'] = torch.LongTensor(collect_ids)
    graph_collect.nodes['item'].data['classify'] = torch.FloatTensor(item_genres)

    # 创建异构图commit
    graph_commit = dgl.heterograph({
        ('user', 'watched', 'item'): (user_ids, item_ids),
        ('item', 'watchedby', 'user'): (item_ids, user_ids)
    })
    # 本来直接用边类型就可以，但是会报错，只好用全称
    graph_commit.edges[('item', 'watchedby', 'user')].data['rating'] = torch.LongTensor(commit_ids)
    graph_commit.edges[('user', 'watched', 'item')].data['rating'] = torch.LongTensor(commit_ids)
    graph_commit.nodes['item'].data['classify'] = torch.FloatTensor(item_genres)

    # 创建异构图duration
    graph_duration = dgl.heterograph({
        ('user', 'watched', 'item'): (user_ids, item_ids),
        ('item', 'watchedby', 'user'): (item_ids, user_ids)
    })
    # 本来直接用边类型就可以，但是会报错，只好用全称
    graph_duration.edges[('item', 'watchedby', 'user')].data['rating'] = torch.LongTensor(duration_ids)
    graph_duration.edges[('user', 'watched', 'item')].data['rating'] = torch.LongTensor(duration_ids)
    graph_duration.nodes['item'].data['classify'] = torch.FloatTensor(item_genres)

    # 创建总图all
    graph_all = dgl.heterograph({
        ('user', 'watched', 'item'): (user_ids, item_ids),
        ('item', 'watchedby', 'user'): (item_ids, user_ids)
    })
    # 本来直接用边类型就可以，但是会报错，只好用全称
    graph_all.edges[('item', 'watchedby', 'user')].data['rating'] = torch.LongTensor(all_ids)
    graph_all.edges[('user', 'watched', 'item')].data['rating'] = torch.LongTensor(all_ids)
    graph_all.nodes['item'].data['classify'] = torch.FloatTensor(item_genres)

    # 设置数据集
    train_dataset = TensorDataset(user_ids, item_ids, score_ids, commit_ids, collect_ids, duration_ids, all_ids)

    # 定义训练过程
    # 注意这里的超参数，是目前跑起来效果不错的一套
    NUM_LAYERS = 1
    BATCH_SIZE = 1000
    NUM_EPOCHS = 1

    HIDDEN_DIMS = 16
    sampler = MinibatchSampler(graph_score, graph_commit, graph_collect, graph_duration, graph_all, NUM_LAYERS)

    # 准备加载数据
    train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, collate_fn=sampler.sample, shuffle=True)

    # 创建模型
    # 注意倒数第三个参数，是每批数据的数值种类
    # 比如score有0-5分，所以是6种；其他都是0-1，所以是2
    model_score = GCMCRating(graph_score.number_of_nodes('user'), graph_score.number_of_nodes('item'), HIDDEN_DIMS, 6,
                             NUM_LAYERS, num_classify)
    model_commit = GCMCRating(graph_commit.number_of_nodes('user'), graph_commit.number_of_nodes('item'), HIDDEN_DIMS,
                              2, NUM_LAYERS, num_classify)
    model_collect = GCMCRating(graph_collect.number_of_nodes('user'), graph_collect.number_of_nodes('item'),
                               HIDDEN_DIMS, 2, NUM_LAYERS, num_classify)
    model_duration = GCMCRating(graph_duration.number_of_nodes('user'), graph_duration.number_of_nodes('item'),
                                HIDDEN_DIMS, 2, NUM_LAYERS, num_classify)
    model_all = GCMCRating(graph_all.number_of_nodes('user'), graph_duration.number_of_nodes('item'),
                           HIDDEN_DIMS, 2, NUM_LAYERS, num_classify)

    # 使用Adam优化器
    # 设置了学习率
    learning_rate = 0.001
    # 设置了参数优化的L2正则化
    l2_decay = 0.01
    opt_score = torch.optim.Adam(model_score.parameters(),  lr=learning_rate, weight_decay=l2_decay)
    opt_commit = torch.optim.Adam(model_commit.parameters(),  lr=learning_rate, weight_decay=l2_decay)
    opt_collect = torch.optim.Adam(model_collect.parameters(),  lr=learning_rate, weight_decay=l2_decay)
    opt_duration = torch.optim.Adam(model_duration.parameters(),  lr=learning_rate, weight_decay=l2_decay)
    opt_all = torch.optim.Adam(model_all.parameters(),  lr=learning_rate, weight_decay=l2_decay)

    # *****************模型训练开始*****************
    for _ in range(NUM_EPOCHS):
        # print('duration only', _)
        # 目前有五个模型，都得训练
        model_score.train()
        model_commit.train()
        model_collect.train()
        model_duration.train()
        model_all.train()
        # 加个进度条，直观
        # ————————————分批训练开始——————————————
        with tqdm.tqdm(train_dataloader) as t:
            real_data = []
            predictions = []
            ratings = []
            for pair_graph, blocks in t:
                # real_data.append(pair_graph[0].edata['real_data'])
                user_emb_score, item_emb_score = model_score(blocks[0])
                user_emb_collect, item_emb_collect = model_collect(blocks[1])
                user_emb_commit, item_emb_commit = model_commit(blocks[2])
                user_emb_duration, item_emb_duration = model_duration(blocks[3])
                user_emb_all, item_emb_all = model_all(blocks[4])

                # 用户向量用收藏、评分、评论、时长四种行为得到的向量求均值
                user_emb = torch.mean(torch.stack([user_emb_collect, user_emb_score, user_emb_commit, user_emb_duration]), 0)

                # 课程向量就用总图里得到的课程向量
                item_emb = item_emb_all

                # 训练过程里的预测，用哪个交互行为的对应图都行
                # 为确保一致性，这里用总图的，也就是pair_graph[4]
                # 计算预测评分的时候，用四个图合成的用户向量，以及总图得到的课程向量
                prediction = model_all.compute_score(pair_graph[4], user_emb, item_emb)
                # 损失函数目前用RMSE，总图里的0-1值，也就是是否存在交互
                # 看起来像是个二分类，后续也可以用交叉熵之类的
                loss = ((prediction - pair_graph[4].edata['rating']) ** 2).mean().sqrt()

                # 后续的梯度下降操作不能少，五个模型一起上
                loss = loss.requires_grad_()
                opt_score.zero_grad()
                opt_commit.zero_grad()
                opt_collect.zero_grad()
                opt_duration.zero_grad()
                opt_all.zero_grad()

                # 五个模型的反向传播，参数更新
                loss.backward()
                opt_score.step()
                opt_collect.step()
                opt_commit.step()
                opt_duration.step()
                opt_all.step()

                # 输出损失函数值
                t.set_postfix({'loss': '%.4f' % loss.item()}, refresh=False)

                # 同时把预测值用列表带出去
                # ratings.append(pair_graph[0].edata['rating'])
                predictions.append(prediction)
            predictions = torch.cat(predictions, 0)
        # —————————分批训练结束——————————

        # 当前训练周期epoch的收尾工作

        # 这个eval()是给带有dropout的模型在产生预测值的时候用的
        # 后续会有预测值产生阶段，建议带上它
        model_score.eval()
        model_commit.eval()
        model_duration.eval()
        model_collect.eval()
        model_all.eval()

        # 输出总图里的真实结果，以及模型预测的结果
        # plist = sum(predictions.tolist(), [])
        # only_all['pre_score'] = plist
        # print(only_all)
        # result = only_all.groupby('user_id').apply(lambda x: x.sort_values(by="interact", ascending=False)).reset_index(
        #     drop=True)
        # result.to_csv('result.csv', index=None)

    # *****************模型训练结束*****************
    # 这里开始，要基于训练好的五个模型，把用户向量和课程向量提取出来
    # 同时在总图的基础上生成它的补图，即之前有的连边去掉，没有的连边连上
    # 在补图上，利用最终向量来实现预测
    # 预测值作为最终推荐值，每个用户的课程推荐值倒序排列，作为最终结果

    # 创建用户、项目全连接图
    full_graph = dgl.heterograph({('user', 'watched', 'item'): (all_user_ids, all_item_ids)})
    # 全连接端节点real_data
    real_data = torch.tensor(list(zip(all_user_ids, all_item_ids)), dtype=torch.int)
    full_graph.edata['real_data'] = real_data
    # 训练时要去掉用户和项目间的关联
    seeds = {'user': list(set(user_ids.tolist())),
             'item': list(set(item_ids.tolist()))}

    sampled_graph = full_graph.in_subgraph(seeds)

    _, _, edges_to_remove = sampled_graph.edge_ids(
        user_ids, item_ids, etype=('user', 'watched', 'item'), return_uv=True)
    # 补图comp_graph
    comp_graph = dgl.remove_edges(sampled_graph, edges_to_remove, ('user', 'watched', 'item'))

    # graph转block
    all_blocks = []
    block = dgl.to_block(graph_all)
    all_blocks.insert(0, block)
    # 将train_blocks输入模型
    user_emb, item_emb = model_all(all_blocks)
    # 基于target_graph得到预测评分
    prediction = model_all.compute_score(comp_graph, user_emb, item_emb)

    # 将补图的端节点给到real_data，形成待预测的交互
    real_data = comp_graph.edata['real_data']
    real_data = pd.DataFrame(real_data.tolist())
    real_data.columns = ['user_id', 'item_id']

    # 将边两端的节点给到result
    result = real_data
    # 列表降维，去掉prediction中单个元素外的中括号
    predictions = np.ravel(prediction.tolist())
    # predictions = sum(prediction.tolist(), [])

    # 将边上预测得到的值给到result的'rating'
    result['rating'] = predictions
    # 按user_id分组排序,每组按预测的推荐值降序
    result = result.groupby('user_id').apply(lambda x: x.sort_values(by="rating", ascending=False)).reset_index(
        drop=True)

    # 字典映射回原始id
    user_dict = np.load('user_dict.npy', allow_pickle=True).item()
    item_dict = np.load('item_dict.npy', allow_pickle=True).item()
    uid = result['user_id'].values.tolist()
    cid = result['item_id'].values.tolist()
    real_uid = []
    real_cid = []
    for i in uid:
        newid = user_dict[i]
        real_uid.append(newid)
    for i in cid:
        newid = item_dict[i]
        real_cid.append(newid)
    result['user_id'] = real_uid
    result['item_id'] = real_cid

    result.to_csv('DGLresult.csv', index=None)


# 运行训练函数
run()


# score_emb_user,score_emb_item = score_emb()
# emb_user_score, emb_item_score, emb_user_collect, emb_item_collect, emb_user_commit, emb_item_commit, emb_user_duration, emb_item_duration = duration_emb()
# collect_emb_user,collect_emb_item = collect_emb()
# commit_emb_user, commit_emb_item = commit_emb()

# print('score的len',len(emb_user_score[0]),len(emb_item_score))
# print('duration的len',len(emb_user_duration),len(emb_item_duration))
# print('collect的len',len(emb_user_collect),len(emb_item_collect))
# print('commit的len',len(emb_user_commit),len(emb_item_commit))
# print('commit的len',emb_user_commit,emb_item_commit)
