import tensorflow as tf
import numpy as np
import os
import dnn_net
import matplotlib.pyplot as plt
import pre_handle_data as p_data
PATH_MODEL_SAVE="model/"
NAME_MODEL="dnn.ckpt"
STEP_NUM_TRAIN=100001
RATE_REGULARIZATION=0.05
SIZE_BATCH=100

if __name__=='__main__':
    x_train,y_train=p_data.get_set_and_label()
    feed_data=tf.placeholder(tf.float32,[None,dnn_net.INPUT_NODE],name="feed_data")
    ex_output=tf.placeholder(tf.float32,[None,dnn_net.OUTPUT_NODE],name="ex_output")
    output=dnn_net.get_dnn_net(feed_data)
    cross_entropy=tf.reduce_mean(tf.square(output-ex_output));
    train_step=tf.train.GradientDescentOptimizer(RATE_REGULARIZATION).minimize(cross_entropy)
    tf.summary.scalar("loss",cross_entropy)
    merged_summary = tf.summary.merge_all()
    init =tf.global_variables_initializer()
    saver=tf.train.Saver()
    graph=tf.get_default_graph()
    with tf.Session() as sess:
        writer=tf.summary.FileWriter("logs/",sess.graph)
        sess.run(init)
        for i in range(1,STEP_NUM_TRAIN):
            rand_index=np.random.choice(len(x_train),size=SIZE_BATCH)
            xs=x_train[rand_index]
            ys=y_train[rand_index]
            eee,loss,summary=sess.run([train_step,cross_entropy,merged_summary],feed_dict={feed_data:xs,ex_output:ys})
            if(loss<0.0001):
                break;
            if i%10==0:
                saver.save(sess,os.path.join(PATH_MODEL_SAVE,NAME_MODEL),global_step=i)
                str=("epcho step %d,cross_entropy is %f")
                writer.add_summary(summary,i)
                print(str % (i,loss))







