#coding:utf-8
import os
import tensorflow as tf
import numpy as np
import glob
import inference

#配置神经网络的参数

LEARNING_RATE_BASE = 0.005      #基础学习率
LEARNING_RATE_DECAY = 0.99    #学习率的衰减率
REGULARIZATION_RATE = 0.0001   #描述模型复杂度的正则化在损失函数中的系数
TRAINING_STEPS = 100          #训练轮数
MOVING_AVERAGE_DECAY = 0.99       #滑动平均衰减率

#模型保存的路径和文件名
MODEL_SAVE_PATH = "e:/test/6-1/"
MODE_NAME = "model.ckpt"
FILE_NAME = "E:/picture/train/data.tfrecords-*"


def get_num_exaples():
    file = []
    num = 0
    file.extend(glob.glob(FILE_NAME))
    for i in file:
        num += int(i.split('-')[-1])
    return num


def read_my_file_format(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'label': tf.FixedLenFeature([], tf.int64),
            'image_raw': tf.FixedLenFeature([], tf.string),
        })

    labels = tf.cast(features['label'],tf.int32)
    decoded_images = tf.decode_raw(features['image_raw'], tf.uint8)
    # 保存图片，图片格式为pic_*。jpg,'*'表示图片的标签
    image_1 = tf.reshape(decoded_images, [299, 299, 3])  ## reshape 成图片矩阵
    image_1 = tf.image.convert_image_dtype(image_1, dtype=tf.float32)
    return  [image_1,labels]

#训练模型的过程
def train():
    files = tf.train.match_filenames_once(FILE_NAME)
    filename_queue = tf.train.string_input_producer(files, shuffle=False)
    min_after_dequeue = 1500
    batch_size = 20
    capacity = min_after_dequeue + 3 * batch_size
    example_list = [read_my_file_format(filename_queue) for _ in range(2)]
    image_batch, label_batch = tf.train.shuffle_batch_join(example_list, batch_size=batch_size,
                                                           capacity=capacity, min_after_dequeue=min_after_dequeue)

    x = tf.placeholder(tf.float32,[
        batch_size,               #第一维表示一个batch中样例的个数
        inference.IMAGE_SIZE,     #第二三维表示图片尺寸
        inference.IMAGE_SIZE,
        inference.NUM_CHANNELS],   #第四维表示图片的深度
        name='x-input')


    y_ = tf.placeholder(tf.int32,None,name='y-input')
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)

    #计算在当前参数下神经网络前向传播的结果。
    y = inference.inference(x,True,regularizer)

    # 定义存储训练轮数的变量。这个变量不需要计算滑动平均值，所以指定这个变量为不可训练
    global_step = tf.Variable(0, trainable=False)

    #给定滑动平均衰减率和训练轮数的变量，初始化滑动平均类
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)

    #在所有代表神经网络参数的变量上使用滑动平均
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels = y_)
    #计算在当前batch中所有样例的交叉熵平均值
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    #设置指数衰减率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               get_num_exaples() / batch_size,
                                               LEARNING_RATE_DECAY)

    #使用tf. 优化算法来优化损失函数
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)

    with tf.control_dependencies ([train_step,variable_averages_op]):
        train_op = tf.no_op(name="train")

    #初始化持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess,coord=coord)
        for i in range(TRAINING_STEPS):
            xs, ys =sess.run([image_batch,label_batch])
            print(type(xs))
            print(ys)
            _, loss_value, step =  sess.run([train_op,loss,global_step], feed_dict={x: xs, y_: ys})
            if i % 20 == 0:
                #输出了模型在当前训练batch上的损失数大小
                print("After %d training step,loss on training""batch is %g" % (step,loss_value))
                #每一千轮保存一次模型，并在文件末尾加上训练次数
                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODE_NAME),global_step=global_step)

        coord.request_stop()
        coord.join(threads)

def main(argv=None):
    train()
if __name__ == '__main__':
   tf.app.run()