import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import warnings
warnings.filterwarnings('ignore')

import numpy as np
import pandas as pd
import tensorflow as tf
from model_zoo.tf_nn_model import TfNN, NNModel
from model_zoo.losses import log_loss

rootpath = "./"
print("Loading small matrix...")
small_matrix = pd.read_csv(rootpath + "data/small_matrix.csv")

print("Loading user features...")
user_features = pd.read_csv("data/user_features.csv")

print("Loading items' daily features...")
item_daily_features = pd.read_csv("data/item_daily_features.csv")

print("All data loaded.")

small_matrix.dropna(inplace=True)

# 'video_type',
model_features = ['user_id', 'video_id', 'video_duration', 'upload_type', 'show_cnt', 'show_user_num',
                  'complete_play_cnt', 'long_time_play_cnt', 'like_cnt']
dense_features = ['video_duration', 'show_cnt', 'show_user_num', 'complete_play_cnt', 'long_time_play_cnt', 'like_cnt']
cat_feats = ['user_id', 'video_id', 'upload_type']

train_matrix = small_matrix[['user_id', 'date', 'video_id', 'video_duration', 'watch_ratio']]
item_feat = item_daily_features[
    ['video_id', 'date', 'video_type', 'upload_type', 'show_cnt', 'show_user_num', 'complete_play_cnt',
     'long_time_play_cnt', 'like_cnt']].copy()
item_feat.loc[:, 'date'] = item_feat['date'] + 1

# 关联上item侧特征
train_features = pd.merge(train_matrix, item_feat, on=['video_id', 'date'], how='left')[:30000]
print(f"train_features shape = ", train_features.shape)
del small_matrix
del user_features
del item_daily_features
del item_feat

# 类别特征简单编码
from sklearn.preprocessing import LabelEncoder

# 创建LabelEncoder对象
label_encoder = LabelEncoder()

# 缺失值填充
train_features[cat_feats] = train_features[cat_feats].fillna("nan")
train_features[dense_features] = train_features[dense_features].fillna(0)

# 类别字典
upload_type = train_features['upload_type'].unique()
video_type = train_features['video_type'].unique()

# 进行编码
label_encoder.fit_transform(upload_type)
train_features['upload_type'] = label_encoder.transform(train_features['upload_type'])

label_encoder.fit_transform(video_type)
train_features['video_type'] = label_encoder.transform(train_features['video_type'])

# 观看超过80%算正样本
train_features['label'] = train_features['watch_ratio'].apply(lambda x: 1 if x > 0.8 else 0)

# 日期划分数据集
train_set = train_features[train_features['date'] < 20200825].copy()
eval_set = train_features[train_features['date'] >= 20200825].copy()

batch_size = 8

# 构建数据集generator
train_dataset = tf.data.Dataset.from_tensor_slices((train_set[model_features], train_set['label']))
train_dataset = train_dataset.shuffle(buffer_size=batch_size * 3)
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)

eval_dataset = tf.data.Dataset.from_tensor_slices((eval_set[model_features], eval_set['label']))
eval_dataset = eval_dataset.batch(batch_size, drop_remainder=False)

train_iter = train_dataset.make_one_shot_iterator()
train_feat, train_label = train_iter.get_next()
train_label = tf.reshape(train_label, [batch_size, 1])
train_label = tf.cast(train_label, tf.float64)

# 构建模型及计算图
nfm_model = NNModel()
pred = nfm_model.forward(train_feat)
diy_loss = log_loss(pred, train_label)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
# train_op = optimizer.minimize(diy_loss, var_list=[nfm_model.w1, nfm_model.w2, nfm_model.b1, nfm_model.b2])
# train_op = optimizer.minimize(diy_loss, var_list=tf.global_variables())
# train_op = optimizer.minimize(diy_loss, var_list=tf.trainable_variables())

grads_and_vars = optimizer.compute_gradients(diy_loss, tf.trainable_variables())
_, var_names = zip(*grads_and_vars)
train_op = optimizer.apply_gradients(grads_and_vars)

# 构建评估流程图
pred_eval = []
eval_iter = eval_dataset.make_one_shot_iterator()
eval_feat, eval_label = eval_iter.get_next()
eval_label = tf.reshape(eval_label, [batch_size, 1])
pred_res_eval = nfm_model.forward(eval_feat)

# 开始训练
training_steps = 3
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    loss_list = []
    try:
        for step in range(training_steps):
            sess.run(train_op)
            gradients = sess.run(grads_and_vars)
            print('predict_score:', sess.run(pred)[:10])
            print('label:', sess.run(train_label)[:10])
            loss_now = sess.run(diy_loss)
            loss_list.append(loss_now)
            print(f"step = {step}, loss = {loss_now}")
            for var_name, (grad, tf_var) in zip(var_names, gradients):
                print(f"Variable:{var_name} \nvalue = {tf_var} \nGradient {grad}")
            # for grad, tf_var in gradients:
            #     print(f"value = {tf_var} \nGradient {grad}")
    except tf.errors.OutOfRangeError:
        print("End of dataset")

    pred = nfm_model.forward(train_feat)
    pred_eval = []
    for i in range(100):
        pred_eval.extend(sess.run(pred_res_eval))
    print("sample num =", len(pred_eval))


# from sklearn.metrics import roc_auc_score
# eval_auc = roc_auc_score(eval_set['label'][:len(pred_eval)], pred_eval)
# print("正样本率 =", eval_set['label'][:len(pred_eval)].mean())
# print("eval_auc=", eval_auc)
#
# import matplotlib.pyplot as plt
# step = np.linspace(1, len(loss_list), len(loss_list))
# plt.plot(step, loss_list)
# plt.show()
