# coding=utf8

import os
import tensorflow as tf

import practice.data.loadles2data as pdg

# get lex
lex = pdg.getlex()

n_input_layer = len(lex)  # 输入层

# get train & test data
dat_sets = pdg.load_vec(n_input_layer)

print ('load over')

dat_sets.test.get_x_labels()

n_input_layer = len(lex)
n_output_layer = 3

x = tf.placeholder(tf.int32, shape=[None, n_input_layer])
y_ = tf.placeholder(tf.float32, shape=[None, n_output_layer ])

p_keep_hidden = tf.placeholder(tf.float32)

batch_size = 50


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)


def neural_network():
    # embedding layer
    with tf.device('/cpu:0'), tf.name_scope("embedding"):
        embedding_size = 128
        W = weight_variable([n_input_layer, embedding_size])
        embedded_chars = tf.nn.embedding_lookup(W, x)
        embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
    # convolution + maxpool layer
    num_filters = 128
    filter_sizes = [3, 4, 5]
    pooled_outputs = []
    for i, filter_size in enumerate(filter_sizes):
        with tf.name_scope("conv-maxpool-%s" % filter_size):
            filter_shape = [filter_size, embedding_size, 1, num_filters]
            W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1))
            b = tf.Variable(tf.constant(0.1, shape=[num_filters]))
            conv = tf.nn.conv2d(embedded_chars_expanded, W, strides=[1, 1, 1, 1], padding="VALID")
            h = tf.nn.relu(tf.nn.bias_add(conv, b))
            pooled = tf.nn.max_pool(h, ksize=[1, n_input_layer - filter_size + 1, 1, 1], strides=[1, 1, 1, 1],
                                    padding='VALID')
            pooled_outputs.append(pooled)

    num_filters_total = num_filters * len(filter_sizes)
    h_pool = tf.concat(axis=3,values=pooled_outputs)
    h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
    # dropout
    with tf.name_scope("dropout"):
        h_drop = tf.nn.dropout(h_pool_flat, p_keep_hidden)
    # output
    with tf.name_scope("output"):
        W = tf.get_variable("W", shape=[num_filters_total, n_output_layer],
                            initializer=tf.contrib.layers.xavier_initializer())
        b = tf.Variable(tf.constant(0.1, shape=[n_output_layer]))
        output = tf.nn.xw_plus_b(h_drop, W, b)

    return output

modeldir = '/Users/vista/PycharmProjects/data/model/'

py_x = neural_network()

optimizer = tf.train.AdamOptimizer(0.01)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=y_))
grads_and_vars = optimizer.compute_gradients(loss)
train_op = optimizer.apply_gradients(grads_and_vars)

accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(py_x, 1), tf.argmax(y_, 1)), "float"))

def train_neural_network():

    saver = tf.train.Saver(tf.global_variables())

    pre_accuracy = 0

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())

        if os.path.exists(modeldir + 'les2cnn.model.ckpt.index'):
            saver.restore(sess, modeldir + 'les2cnn.model.ckpt')

        for i in range(32000):
            if i % 10 == 0:
                test_accuracy = accuracy.eval(
                    feed_dict={x: dat_sets.test.x, y_: dat_sets.test.labels, p_keep_hidden: 1.0})
                print (test_accuracy)
                if test_accuracy > pre_accuracy:
                    print (str(pre_accuracy) + '->' + str(test_accuracy))
                    pre_accuracy = test_accuracy
            if i % 100 == 0 and i > 0:
                saver.save(sess, modeldir + 'les2cnn.model.ckpt')
            batch = dat_sets.train.next_batch(50)
            train_op.run(feed_dict={x: batch[0], y_: batch[1], p_keep_hidden: 0.5})
        saver.save(sess, modeldir + 'les2cnn.model.ckpt')


train_neural_network()