import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split

tf.reset_default_graph()

def add_layer(inputs, in_size, out_size, activation_function=None):
    # 随机初始化weights
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    # 偏置项初始化为0.1
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    # L2正则, 参数经调试，该数据集适合比较小的参数，最终选取为0.0008
    layer_regular = tf.contrib.layers.l2_regularizer(0.0008)(Weights)
    # 正则加入集合，后续好计算
    tf.add_to_collection('losses',layer_regular)
    Wx_plus_b = tf.matmul(inputs, Weights) + biases
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs

x = tf.placeholder(tf.float32, [None, 100])
l1 = add_layer(x,100,50,activation_function=tf.nn.tanh)
l2 = add_layer(l1,50,30,activation_function=tf.nn.tanh)
l3 = add_layer(l2,30,22,activation_function=tf.nn.tanh)
l4 = add_layer(l3,22,11,activation_function=tf.nn.tanh)
y = add_layer(l4, 11, 11, activation_function=None)

y_ = tf.placeholder(tf.float32, [None, 11])

cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
tf.add_to_collection('losses',cross_entropy)
# 损失函数加上正则，如果不使用正则准确率无法达到98%，感觉正则还是必须的
loss=tf.add_n(tf.get_collection('losses'))

train_step = tf.train.GradientDescentOptimizer(0.38).minimize(loss)#0.38

sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)

# lsi_matrix = np.loadtxt(open("../model/all_lsi.csv","rb"),delimiter=",",skiprows=0)
# lsi_matrix = np.loadtxt(open("../model/keyword_word2vec.csv", "rb"), delimiter=",", skiprows=0)
lsi_matrix = np.load('../model/doc_vectors_2row.npy')
df_train = pd.read_csv('../data/train_processed.csv')

X = lsi_matrix[0:4774, :]
Y = df_train['label'].values
rows_count = len(Y)
Y_onehot = np.zeros((rows_count,11),dtype=float)
for i in range(0,rows_count):
    Y_onehot[i, Y[i]-1] = 1

X_train, X_val, y_train, y_val = train_test_split(X, Y_onehot, test_size=0.20, random_state=4)

# Train
for _ in range(1000):
    for i in range(10):
        batch_xs = X_train[i*477:(i+1)*477,:]
        batch_ys = y_train[i*477:(i+1)*477,:]
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
    correct_batch = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy_batch = tf.reduce_mean(tf.cast(correct_batch, tf.float32))
    print(sess.run(accuracy_batch, feed_dict={x: X_val,
                                      y_: y_val}))