import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
from sklearn.model_selection import train_test_split

tf.disable_v2_behavior()

data = pd.read_table('datingTestSet2.txt', header=None, names=['param1', 'param2', 'param3', 'type'],
                     encoding='utf-8', sep='\t')
categorical_columns = ['param1', 'param2', 'param3']
# for f in categorical_columns:
#     data[f] = data[f].map(dict(zip(data[f].unique(), range(0, data[f].nunique()))))
# shuffle(data)
target = data['type']
del data['type']
print('Data Shape', data.shape)
print('Target Shape', target.shape)

# data: [1000, 3]
# target: [1000, 1]
# 将data转为[batch_size, sequence_length, input_dimension] shape, 最后一维对应正常NLP任务的字embedding维度，这里取1
# 而target转为[batch_size, output_dimension] shape， 这里直接用pd.get_dummy转换
train_input, train_output = np.expand_dims(np.array(data), axis=-1), pd.get_dummies(target)
print('train_input')
print(train_input)
print('train_output')
print(train_output)
# 取60%为训练数据，另外的为测试数据
# NUM_EXAMPLES = 500000
# test_input = train_input[NUM_EXAMPLES:]
# test_output = train_output[NUM_EXAMPLES:]
# train_input = train_input[:NUM_EXAMPLES]
# train_output = train_output[:NUM_EXAMPLES]
train_input, test_input, train_output, test_output = train_test_split(train_input, train_output, shuffle=False,
                                                                      test_size=0.2, random_state=2019)

# 定义两个变量，其中data的维度 = [Batch Size, Sequence Length, Input Dimension]
data = tf.placeholder(tf.float32, [None, 3, 1])
target = tf.placeholder(tf.float32, [None, 3])

# 定义hidden dimension = 24，太多会overfitting，太少效果不好，可以调节看变化
# 模型用LSTM
num_units = 31
cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=True)
val, _ = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32)

# 变换一下维度，并取val的最后一个为last
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)

# 定义w,b
weight = tf.Variable(tf.truncated_normal([num_units, int(target.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))

# softmax
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)

# cost function
cross_entropy = -tf.reduce_mean(target * tf.log(tf.clip_by_value(prediction, 1e-10, 1.0)))
optimizer = tf.train.AdamOptimizer()
minimize = optimizer.minimize(cross_entropy)

# 定义一下 error 的形式，就是预测和实际有多少个位置不一样
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))

# 定义完模型和变量，开始启动session
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)

batch_size = 10000
no_of_batchs = int(len(train_input)) // batch_size
epoch = 1000

for i in range(epoch):
    ptr = 0
    for j in range(no_of_batchs):
        inp, out = train_input[ptr:ptr + batch_size], train_output[ptr:ptr + batch_size]
        ptr += batch_size
        sess.run(minimize, {data: inp, target: out})
    if i % 10 == 0:
        loss = sess.run(cross_entropy, feed_dict={data: train_input, target: train_output})
        print("epoch: %d, loss: %lf" % (i, loss))
    # print('Epoch{}'.format(i))

incorrect = sess.run(error, {data: test_input, target: test_output})
print('Epoch {:2d} error {:3.1f}%'.format(i + 1, 100 * incorrect))

incorrect = sess.run(error, {data: train_input, target: train_output})
print('Epoch {:2d} train data error {:3.1f}%'.format(i + 1, 100 * incorrect))

# print(sess.run(prediction, {data: [[[0.],
#                                     [0.],
#                                     [487.],
#                                     [475.506],
#                                     [103.],
#                                     [82.412],
#                                     [26.183],
#                                     [7.677]]]}))
