#!/usr/bin/env python
# coding=utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

# import tianchi_fresh.utils

'''
思路：
框架：循环神经网络
1、数据处理，划分训练集，测试集，（验证集）
batch: 3、5、7天，预测下一天

 *** 将 "item_id" 项放在最后
 * 双12处理：去除双十二，用13号数据填充

2、构建网络

3、最小化误差，使用梯度下降算法

4、结果评判，使用f1,精确度(precision)、召回率(recall)和F1值作为评估指标
from sklearn.metrics import precision_recall_curve, roc_curve, auc

TP: true positive, ++
FN: false negative, +-
FP: false positive, -+
TN: true negative, --
precision = TP / (TP + FP)
recall = TP / (TP + FN)
F1 = 2 * precision * recall / (precision + recall)
'''
'''
# y_true, y_pred
# TP = (y_pred==1)*(y_true==1)
# FP = (y_pred==1)*(y_true==0)
# FN = (y_pred==0)*(y_true==1)
# TN = (y_pred==0)*(y_true==0)
# TP + FP = y_pred==1
# TP + FN = y_true==1
'''
def precision_score(y_true, y_pred):
    return ((y_true==1)*(y_pred==1)).sum()/(y_pred==1).sum()
def recall_score(y_true, y_pred):
    return ((y_true==1)*(y_pred==1)).sum()/(y_true==1).sum()
def f1_score(y_true, y_pred):
    num = 2* precision_score(y_true, y_pred)*recall_score(y_true, y_pred)
    deno = (precision_score(y_true, y_pred)+recall_score(y_true, y_pred))
    return num/deno

# from sklearn.metrics import precision_recall_curve
import tensorflow as tf
import pandas as pd
import numpy as np

lr = 0.001
train_iters = 30
batch_size = 3 # 3, 5, 7
n_inputs = 43
n_steps = 1
n_hidden_units = 64 #hidden layer
n_classes = 24 #4758484 # outputs, total item categories
data_dir = "../data/tianchi/fresh_comp_offline/"

batch_original = batch_size

def data_processing(data_dir):

    data_dir_train_item = data_dir + "tianchi_fresh_comp_train_item.csv"
    data_dir_train_user = data_dir + "tianchi_fresh_comp_train_user.csv"

    train_items = pd.read_csv(data_dir_train_item)
    train_users = pd.read_csv(data_dir_train_user)

    np.random.shuffle(train_users)  # 打乱数据

    train_items_labels = train_items['item_id']
    train_users_labels = train_users['item_id']
    # return train_items,train_items_labels, train_users, train_items_labels, train_users_labels
    return train_users, train_items_labels, train_users_labels

def process_data(data_dir = "./data_item.csv"):
    ''' 以时间为序，对数据进行重组， 返回数据和数据标签'''
    # data_dir = "./data_item.csv"
    data = pd.read_csv(data_dir)
    dates = pd.to_datetime(data['time'])
    data.set_index(dates)
    new_data = pd.DataFrame(data,
                            columns=['user_id', 'item_id', 'behavior_type', 'item_category'])
    new_data = new_data.set_index(dates, drop=True)

    new_data = new_data.sort_index()
    new_data = new_data.applymap(str) # convert Dataframe to str
    new_data_label = pd.DataFrame(new_data, columns=['item_id'], index=None)
    # onehot
    new_data = pd.get_dummies(new_data)
    new_data_label = pd.get_dummies(new_data_label)
    '''
        new_data: n * 43
        new_data_label: n * 24
    '''
    return new_data, new_data_label

def next_batch(items, labels, batch_size,start=0):
    item_batch = items[start:start+batch_size]
    label_batch = labels[start:start+batch_size]
    return item_batch, label_batch

# train_items,train_items_labels, train_users, train_users_labels = utils(data_dir)
train_users, train_users_labels = process_data()

x = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_classes])

weights = {
    'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),

    'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))

}

biases = {
    'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),

    'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))

}

def RNN(X, weights, biases):

    X = tf.reshape(X, [-1, n_inputs])
    X_in = tf.matmul(X, weights['in']) + biases['in']
    X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])


    cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True, reuse=tf.get_variable_scope().reuse)

    init_state = cell.zero_state(batch_size, dtype=tf.float32)
    with tf.variable_scope('rnn'):
        outputs, final_state = tf.nn.dynamic_rnn(cell, X_in, initial_state=init_state,time_major=False,scope='rnn')

    # outputs = tf.unstack(tf.transpose(outputs, [0, 1, 2]))
    outputs = tf.reshape(outputs, [-1, n_hidden_units])
    results = tf.matmul(outputs, weights['out']) + biases['out']

    return results

pred = RNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
train_op = tf.train.AdamOptimizer(lr).minimize(cost)

correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))  # why

with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    step = 0
    while step * batch_size < train_iters:
        # train_items, train_users
        start = step * batch_size
        batch_xs, batch_ys = next_batch(train_users, train_users_labels, batch_size, start)
        # 将batch_xs 转为RNN能接受的输入
        batch_xs = tf.reshape(batch_xs, [batch_size, n_steps, n_inputs])
        # 将batch_xs, batch_ys 转换成 narray
        batch_xs = sess.run(batch_xs)
        batch_ys = batch_ys.values
        # 执行训练
        _, pre = sess.run([train_op, pred], feed_dict={x:batch_xs, y:batch_ys})
        print("item %s, " % step)
        print("pred %s" % pre)
        if step % 1 == 0:
            batch_xs, batch_ys = next_batch(train_users, train_users_labels, batch_size, start)
            # 将batch_xs 转为RNN能接受的输入
            batch_xs = tf.reshape(batch_xs, [batch_size, n_steps, n_inputs])
            # 将batch_xs, batch_ys 转换成 narray
            batch_xs = sess.run(batch_xs)
            batch_ys = batch_ys.values
            accr = sess.run(accuracy, feed_dict={x:batch_xs, y:batch_ys})
            print("accuracy: %s " % accr)
        step+=1