# coding=utf8

from __future__ import print_function

import os
import tensorflow as tf
import practice.data.getles2data as pdg

# get lex
lex = pdg.getlex()

n_input_layer = len(lex)  # 输入层

# get train & test data
dat_sets = pdg.load_vec(n_input_layer)

print ('load over')

dat_sets.test.get_x_labels()

n_layer_1 = 512  # hide layer
n_layer_2 = 512  # hide layer(隐藏层)听着很神秘，其实就是除输入输出层外的中间层

n_output_layer = 3  # 输出层

def neural_network( x , layer_1_w_b , layer_2_w_b, layer_output_w_b ):#,p_hidden_keep):
    # w·x+b
    layer_1 = tf.add(tf.matmul(x , layer_1_w_b['w_']), layer_1_w_b['b_'])
    layer_1 = tf.nn.relu(layer_1)  # 激活函数
    #layer_1 = tf_vista.nn.dropout(layer_1,p_hidden_keep)

    layer_2 = tf.add(tf.matmul(layer_1, layer_2_w_b['w_']), layer_2_w_b['b_'])
    layer_2 = tf.nn.relu(layer_2)  # 激活函数
    #layer_2 = tf_vista.nn.dropout(layer_2,p_hidden_keep)

    layer_output = tf.add(tf.matmul(layer_2, layer_output_w_b['w_']), layer_output_w_b['b_'])

    return layer_output
    #return tf_vista.nn.softmax(layer_output)


# 定义第一层"神经元"的权重和biases
layer_1_w_b = {'w_': tf.Variable(tf.random_normal([n_input_layer, n_layer_1])),
               'b_': tf.Variable(tf.random_normal([n_layer_1]))}
# 定义第二层"神经元"的权重和biases
layer_2_w_b = {'w_': tf.Variable(tf.random_normal([n_layer_1, n_layer_2])),
               'b_': tf.Variable(tf.random_normal([n_layer_2]))}
# 定义输出层"神经元"的权重和biases
layer_output_w_b = {'w_': tf.Variable(tf.random_normal([n_layer_2, n_output_layer])),
                    'b_': tf.Variable(tf.random_normal([n_output_layer]))}


x = tf.placeholder("float", shape=[None, n_input_layer])
y_ = tf.placeholder("float", shape=[None, n_output_layer ])

#p_keep_hidden = tf_vista.placeholder('float')

py_x = neural_network( x ,layer_1_w_b , layer_2_w_b, layer_output_w_b )#, p_keep_hidden)

cost_func = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x,labels=y_))

#cost_func = -tf_vista.reduce_sum(y_ * tf_vista.log(py_x))

optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost_func)

accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(py_x, 1), tf.argmax(y_, 1)), "float"))


modeldir = '/Users/vista/PycharmProjects/data/model/'

with tf.Session() as session:

    session.run(tf.global_variables_initializer())

    saver = tf.train.Saver()

    pre_accuracy = 0

    if os.path.exists(modeldir+'les2.model.ckpt.index'):
        saver.restore(session,modeldir+'les2.model.ckpt')

    for i in range(60000):
        if i % 50 == 0 :
            test_accuracy = accuracy.eval(feed_dict={x: dat_sets.test.x, y_: dat_sets.test.labels})#, p_keep_hidden: 1.0})
            print (test_accuracy)
            if test_accuracy > pre_accuracy:
                print (str(pre_accuracy)+'->'+str(test_accuracy))
                pre_accuracy = test_accuracy
        if i % 500 == 0 and i > 0:
            saver.save(session,modeldir+'les2.model.ckpt')
        batch = dat_sets.train.next_batch(50)
        optimizer.run(feed_dict={x: batch[0], y_: batch[1]})#, p_keep_hidden: 1.0})

    # file_writer = tf_vista.summary.FileWriter('/Users/vista/PycharmProjects/logs', sess.graph)

    print(accuracy.eval(feed_dict={x: dat_sets.test.x, y_: dat_sets.test.labels}))#,  p_keep_hidden: 1.0}))
