#!/usr/bin/env python
# -*- coding:utf-8 -*-

# 导入必要的库
import os, sys
import time
import datetime
import pandas as pd
import numpy as np
import tensorflow as tf

# 模型参数定义
user_dense_size = 46            # 用户密集特征维度
user_sparse_size = 53           # 用户稀疏特征维度
user_sex_size = 3               # 用户性别特征维度
user_age_size = 7               # 用户年龄特征维度
user_client_size = 5            # 用户客户端特征维度
user_tag_size = 38              # 用户标签特征维度
user_sparse_embedding_size = 20 # 用户稀疏特征的嵌入维度
max_seq_len = 20                # 序列最大长度
all_song_size = 300000          # 歌曲总数
sim_vec_size = 128              # 相似度向量维度
dnn_layer_nums = [516, 218, 128]# DNN网络层节点数
batch_size = 512                # 批处理大小
learning_rate = 0.001           # 学习率
top_k = 50                      # 推荐结果取Top-K
train_epoch = 5                 # 训练轮数


def seq_avg(seq_inputs, seq_len, max_len, node_name):
    """
    序列平均函数：计算序列的加权平均表示
    
    参数:
      seq_inputs: 序列输入
      seq_len: 实际序列长度
      max_len: 最大序列长度
      node_name: 输出节点名称
    
    返回:
      序列的加权平均表示
    """
    dim = seq_inputs.get_shape().as_list()[-1]

    seq_len_new = tf.reshape(seq_len, [-1])
    mask = tf.expand_dims(tf.sequence_mask(seq_len_new, max_len), axis=-2)  # [B,1,H]
    init_weight = tf.ones_like(mask, dtype="float32") * (1.0 / max_len)
    pad = tf.ones_like(mask, dtype="float32") * (-2 ** 16 + 1)
    mask_weight = tf.where(mask, init_weight, pad)
    final_weight = tf.nn.softmax(mask_weight)  # [B,1,H]

    avg_out = tf.reshape(tf.matmul(final_weight, seq_inputs), [-1, dim], name=node_name)

    return avg_out


def train():
    """
    模型训练函数
    """
    tf.reset_default_graph()
    tf_config = tf.ConfigProto()
    # tf_config.gpu_options.allow_growth = True
    # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    # ---------- 创建占位符 ----------
    user_dense_pl = tf.placeholder(tf.float32, shape=[None, user_dense_size], name="user_dense_pl")      # 用户密集特征
    user_sparse_pl = tf.placeholder(tf.float32, shape=[None, user_sparse_size], name="user_sparse_pl")   # 用户稀疏特征
    end_seq_idx_pl = tf.placeholder(tf.int64, shape=[None, max_seq_len], name="end_seq_idx_pl")          # 末尾序列索引
    end_seq_len_pl = tf.placeholder(tf.int64, shape=[None, 1], name="end_seq_len_pl")                    # 末尾序列长度
    sub_seq_idx_pl = tf.placeholder(tf.int64, shape=[None, max_seq_len], name="sub_seq_idx_pl")          # 子序列索引
    sub_seq_len_pl = tf.placeholder(tf.int64, shape=[None, 1], name="sub_seq_len_pl")                    # 子序列长度
    songIdx_pl = tf.placeholder(tf.int64, shape=[None, 1], name="songIdx_pl")                            # 目标歌曲索引

    # ---------- 构建模型 ----------
    # 物品塔（Item Tower） - 在 "embedding" 命名空间中定义：
    with tf.name_scope("embedding"):
        # 默认嵌入向量（用于填充）
        default_embedding = tf.Variable(
            initial_value=np.zeros([1, sim_vec_size]),
            dtype="float32",
            trainable=False
        )
        # 序列中歌曲的嵌入矩阵
        end2end_embeddings_tmp = tf.get_variable(
            shape=[all_song_size, sim_vec_size],
            initializer=tf.random_uniform_initializer(),
            dtype="float32",
            name="end2end_tmp"
        )
        end2end_embeddings = tf.concat([default_embedding, end2end_embeddings_tmp], axis=0, name="end2end")

        # 目标歌曲的嵌入矩阵
        song_embeddings = tf.get_variable(
            shape=[all_song_size, sim_vec_size],
            initializer=tf.random_uniform_initializer(),
            dtype="float32",
            name="target"
        )
        # 用户特征嵌入矩阵
        user_sex_embedding = tf.get_variable(
            shape=[user_sex_size, user_sparse_embedding_size],
            initializer=tf.random_uniform_initializer(),
            dtype="float32",
            name="user_sex"
        )
        user_age_embedding = tf.get_variable(
            shape=[user_age_size, user_sparse_embedding_size],
            initializer=tf.random_uniform_initializer(),
            dtype="float32",
            name="user_age"
        )
        user_client_embedding = tf.get_variable(
            shape=[user_client_size, user_sparse_embedding_size],
            initializer=tf.random_uniform_initializer(),
            dtype="float32",
            name="user_client"
        )
        user_tag_embedding = tf.get_variable(
            shape=[user_tag_size, user_sparse_embedding_size],
            initializer=tf.random_uniform_initializer(),
            dtype="float32",
            name="user_tag"
        )

    with tf.name_scope("song"):
        # 查询歌曲嵌入并计算序列平均表示
        end_seq_emb = tf.nn.embedding_lookup(end2end_embeddings, end_seq_idx_pl, name="end_seq_embedding")
        sub_seq_emb = tf.nn.embedding_lookup(end2end_embeddings, sub_seq_idx_pl, name="sub_seq_embedding")
        end_seq_avg = seq_avg(end_seq_emb, end_seq_len_pl, max_seq_len, node_name="end_seq_avg")
        sub_seq_avg = seq_avg(sub_seq_emb, sub_seq_len_pl, max_seq_len, node_name="sub_seq_avg")

    #用户塔（User Tower） - 主要在 "user" 和 "forward" 命名空间中构建：
    with tf.name_scope("user"):
        # 处理用户特征
        user_sex, user_age, user_client, user_tag = tf.split(
            user_sparse_pl, [user_sex_size, user_age_size, user_client_size, user_tag_size], axis=1
        )

        # 计算各个特征的嵌入表示
        user_sex_embed = tf.matmul(user_sex, user_sex_embedding)
        user_age_embed = tf.matmul(user_age, user_age_embedding)
        user_client_embed = tf.matmul(user_client, user_client_embedding)
        user_tag_embed = tf.matmul(user_tag, user_tag_embedding)

        # 合并用户稀疏特征嵌入
        user_sparse_feature = tf.concat(
            [user_sex_embed, user_age_embed, user_client_embed, user_tag_embed],
            axis=-1, name="sparse_feature"
        )

    with tf.name_scope("forward"):
        # 合并所有特征作为DNN输入
        dnn_input = tf.concat(
            [end_seq_avg, sub_seq_avg, user_sparse_feature, user_dense_pl],
            axis=-1, name="sparse_feature"
        )
        # 构建DNN网络
        dnn_layer = dnn_input
        for layer_num in dnn_layer_nums:
            dnn_layer = tf.layers.dense(inputs=dnn_layer, units=layer_num, activation=tf.nn.relu)
        user_embedding = tf.identity(dnn_layer, "user_embedding")  # 最终的用户嵌入表示

    # 训练目标 - 在 "loss" 命名空间中使用采样softmax损失函数：
    with tf.name_scope("loss"):
        # 使用采样softmax计算损失
        zero_bias = tf.Variable(tf.zeros([all_song_size]), trainable=False)
        last_loss = tf.reduce_mean(
            tf.nn.sampled_softmax_loss(
                weights=song_embeddings,
                biases=zero_bias,
                labels=songIdx_pl,
                inputs=user_embedding,
                num_sampled=5000,  # 负采样数量
                num_classes=all_song_size
            )
        )

        # 定义优化器
        opt = tf.train.MomentumOptimizer(learning_rate, 0.95)
        train_op = opt.minimize(last_loss)

        global_steps = tf.Variable(0, trainable=False)
        step_on = global_steps.assign_add(1)

    # 双塔交互（Interaction）
    with tf.name_scope("prediction"):
        # 计算用户与所有歌曲的相似度
        prediction = tf.matmul(user_embedding, song_embeddings, transpose_b=True)
        # 获取Top-K推荐结果
        top_k_index = tf.nn.top_k(prediction, k=top_k).indices
        # 计算命中率
        hit = tf.reduce_sum(
            tf.cast(
                tf.nn.in_top_k(
                    targets=tf.reshape(songIdx_pl, [-1]),
                    predictions=prediction,
                    k=50
                ),
                "float32"
            ), name="hit_num"
        )
        # 计算目标歌曲的得分
        target_song_vector = tf.reshape(tf.nn.embedding_lookup(song_embeddings, songIdx_pl), [-1, sim_vec_size])
        score = tf.reduce_sum(tf.multiply(user_embedding, target_song_vector), axis=-1, name="score")  # [B]

    # ---------- 模型训练 ----------

    # 设置检查点保存路径
    ckpt_dir = r'D:/NetEaseCloudClass/data/checkpoint'
    print("ckpt_dir: " + ckpt_dir)
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)

    with tf.Session(config=tf_config) as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(max_to_keep=3)  # 保存最近3个检查点

        t1 = datetime.datetime.now()
        # 加载训练数据
        data_file = r'D:/NetEaseCloudClass/data/train.csv'
        df = pd.read_csv(data_file, delimiter='\t')
        
        # 训练多轮次
        for epoch in range(1, train_epoch + 1):
            print("--------------------------------- begin:epoch_{:d} ---------------------------------".format(epoch))
            # 打乱数据
            df2 = df.sample(frac=1).reset_index(drop=True)
            max_step = int(len(df2) / batch_size) + 1
            print(max_step)

            # 按批次训练
            for s in range(max_step):
                # 获取当前批次数据
                train_batch = df2.iloc[s * batch_size:min((s + 1) * batch_size, len(df2))]
                idx = [[x] for x in train_batch["songIdx"].tolist()]
                run_op_list = [last_loss, user_embedding, step_on, train_op, top_k_index]
                
                # 准备喂入数据
                feed_dict = {
                    user_dense_pl: [[float(y) for y in x.split(",")] for x in train_batch["userDenseF"].tolist()],
                    user_sparse_pl: [[float(y) for y in x.split(",")] for x in train_batch["userSparseF"].tolist()],
                    end_seq_idx_pl: [[int(y) + 1 for y in x.split(",")] for x in
                                     train_batch["endSongSeqIdx"].tolist()],
                    end_seq_len_pl: [[x] for x in train_batch["endSeqLen"].tolist()],
                    sub_seq_idx_pl: [[int(y) + 1 for y in x.split(",")] for x in
                                     train_batch["subSongSeqIdx"].tolist()],
                    sub_seq_len_pl: [[x] for x in train_batch["subSeqLen"].tolist()],
                    songIdx_pl: idx
                }

                # 执行训练
                out_list = sess.run(run_op_list, feed_dict=feed_dict)

                t2 = datetime.datetime.now()
                t = (t2 - t1).total_seconds()

                # 计算命中率
                loss, user_embedding_final, step, _, top_k_final = out_list
                hit_num = 0
                for i in range(len(idx)):
                    if idx[i][0] in top_k_final[i]:
                        hit_num += 1
                accuracy = hit_num * 1.0 / len(idx)
                
                # 打印训练信息
                print("Train> iter={:d}, loss={:4f}, idx={:d}, accuracy={:f}, time={:f}s".format(
                    step, loss, idx[0][0], accuracy, t))
                sys.stdout.flush()
                t1 = t2

                # 定期保存模型
                if step % 10 == 0:
                    loss, user_embedding_final, step, _, _ = out_list
                    print("vector: user=")
                    print(user_embedding_final[0])

                    cpStep = step
                    print(">>> save step_{0}, {1} <<<".format(cpStep, datetime.datetime.now()))
                    sys.stdout.flush()
                    saver.save(sess, ckpt_dir + "/YoutubeDnn", cpStep)

        print("--------------------------------- end:epoch_{:d} ---------------------------------".format(epoch))


def test(test_step):
    """
    模型测试函数
    
    参数:
      test_step: 要测试的检查点步数
    """
    # 加载测试数据
    data_file = r'D:/NetEaseCloudClass/data/test.csv'
    df = pd.read_csv(data_file, delimiter='\t')

    # 加载模型图
    graph = tf.get_default_graph()
    with graph.as_default():
        model_file = r'D:/NetEaseCloudClass/checkpoint/YoutubeDnn-' + str(test_step)
        saver = tf.train.import_meta_graph(model_file + '.meta')

    with tf.Session(graph=graph) as sess:
        # 恢复模型参数
        saver.restore(sess=sess, save_path=model_file)

        # 获取所需节点
        user_embedding_node = graph.get_tensor_by_name("forward/user_embedding:0")
        hit_node = graph.get_tensor_by_name("prediction/hit_num:0")
        score_node = graph.get_tensor_by_name("prediction/score:0")

        print("-------------------------------------")
        all_hit_num = 0.
        all_sample_num = 0.
        t1 = datetime.datetime.now()

        # 按批次进行测试
        batch_size = 100
        num = int(len(df) / 100) + 1
        for s in range(num):
            # 获取当前批次数据
            test_batch = df.iloc[s * batch_size: min((s + 1) * batch_size, len(df))]
            idx = [[x] for x in test_batch["songIdx"].tolist()]
            
            # 准备喂入数据
            feed_dict = {
                "user_dense_pl:0": [[float(y) for y in x.split(",")] for x in test_batch["userDenseF"].tolist()],
                "user_sparse_pl:0": [[float(y) for y in x.split(",")] for x in test_batch["userSparseF"].tolist()],
                "end_seq_idx_pl:0": [[int(y) + 1 for y in x.split(",")] for x in test_batch["endSongSeqIdx"].tolist()],
                "end_seq_len_pl:0": [[x] for x in test_batch["endSeqLen"].tolist()],
                "sub_seq_idx_pl:0": [[int(y) + 1 for y in x.split(",")] for x in test_batch["subSongSeqIdx"].tolist()],
                "sub_seq_len_pl:0": [[x] for x in test_batch["subSeqLen"].tolist()],
                "songIdx_pl:0": idx
            }

            # 执行测试
            user_embedding, batch_hit, batch_score = sess.run(
                [user_embedding_node, hit_node, score_node],
                feed_dict=feed_dict
            )
            all_hit_num += batch_hit
            batch_size = len(user_embedding)
            all_sample_num += batch_size

            # 定期打印测试信息
            if s % 10 == 0:
                t2 = datetime.datetime.now()
                t = (t2 - t1).total_seconds() / 10.0
                accuracy = batch_hit / batch_size
                print("Test> iter={:d}, accuracy={:4f}, time={:3f}s".format(s, accuracy, t))
                print(user_embedding[0])
                sys.stdout.flush()
                t1 = t2

        # 计算总体准确率
        Accuracy = all_hit_num / all_sample_num
        print("Accuracy: {:6f}".format(Accuracy))


if __name__ == '__main__':
    # 记录开始时间
    startTime = time.mktime(time.localtime())

    # 执行训练
    train()
    # test(10)  # 测试函数已注释，取消注释可执行测试

    # 计算并打印总耗时
    endTime = time.mktime(time.localtime())
    print("Finished, time consumed: %s hours %s minutes %s seconds." % (
        str(int((endTime - startTime) / 3600)), str(int(((endTime - startTime) % 3600) / 60)),
        str((endTime - startTime) % 60)))
