#coding:utf-8
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

# 模型相关的参数
INPUT_NODE = 299*299
OUTPUT_NODE = 1
LAYER1_NODE = 500
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 500
learning_rate = 0.01


def inference(input_tensor, weights1, biases1, weights2, biases2):
    layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
    return tf.matmul(layer1, weights2) + biases2


files = tf.train.match_filenames_once("E:/data.tfrecords-00000-of-00022")
filename_queue = tf.train.string_input_producer(files,shuffle=False)
reader = tf.TFRecordReader()
_,serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
    serialized_example,
    features = {
        'label':tf.FixedLenFeature([],tf.int64),
        'image_raw':tf.FixedLenFeature([],tf.string),
    })
labels = tf.cast(features['label'],tf.int32)
decoded_images = tf.decode_raw(features['image_raw'],tf.uint8)

# 保存图片，图片格式为pic_*。jpg,'*'表示图片的标签
image_1 = tf.reshape(decoded_images, [299,299,3])  ## reshape 成图片矩阵
image_1 = tf.image.convert_image_dtype(image_1,dtype=tf.float32)
min_after_dequeue = 100
batch_size = 10
capacity = min_after_dequeue + 3 * batch_size
image_batch,label_batch = tf.train.shuffle_batch([image_1,labels],batch_size=batch_size,
                                                 capacity=capacity,min_after_dequeue=min_after_dequeue)



weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))

weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))

y = inference(image_batch, weights1, biases1, weights2, biases2)

# 计算交叉熵及其平均值
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=label_batch)
cross_entropy_mean = tf.reduce_mean(cross_entropy)

# 损失函数的计算
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
regularaztion = regularizer(weights1) + regularizer(weights2)
loss = cross_entropy_mean + regularaztion

# 优化损失函数
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer(),tf.local_variables_initializer())
    coord= tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess,coord=coord)
    # for i in range(2):
    #     image,label=sess.run([image_batch,label_batch])

        # 循环的训练神经网络。
    for i in range(TRAINING_STEPS):
        if i % 100 == 0:
            print("After %d training step(s), loss is %g " % (i, sess.run(loss)))

        sess.run(train_step)
    coord.request_stop()
    coord.join(threads)

