#coding:utf-8

import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.python.slim.nets.inception_v3 as inception_v3

#处理好之后的数据文件
TRAIN_FILE_NAME = "E:/picture/train/data.tfrecords-*"
VALIDATION_FILE_NAME = "E:/picture/validation/validation.npy"

#保存训练好的模型的路径
TRAIN_FILE = 'E:/tesst/model'

#谷歌提供的训练好的模型文件
CKPT_FILE = 'D:/TSBrowserDownloads/inception_v3_2016_08_28/inception_v3.ckpt'

#定义训练中使用的参数
LEARNING_RATE = 0.0001
STEPS = 300
BATCH = 32
N_CLASSES = 5


#不需要谷歌训练好的模型中加载参数。这里是重新训练的全连接层参数前缀
CHECKPOINT_EXCLUDE_SCOPES = 'InceptionV3/Logits,InceptionV3/AuxLogits'

#需要训练的网络层参数名称在fine-tuning的过程中就是最后的全连接层
#这里是参数的前缀
TRAINABLE_SCOPES = 'InceptionV3/Logits,InceptionV3/AuxLogits'

#存储验证集
validation_image = []
validation_label = []
temp = []
#获取所有需要从谷歌训练好的模型加载参数
def get_tuned_variables():
    exclusions = [scope.strip() for scope in CHECKPOINT_EXCLUDE_SCOPES.split(',')]

    variables_to_restore = []
    # 枚举inception-v3模型中所有的参数，然后判断是否需要从加载列表中移除。
    for var in slim.get_model_variables():
        excluded = False
        for exclusion in exclusions:
            if var.op.name.startswith(exclusion):
                excluded = True
                break
        if not excluded:
            variables_to_restore.append(var)
    return variables_to_restore

#获取所有需要训练的变量列表
def get_trainable_variables():
    scopes = [scope.strip() for scope in TRAINABLE_SCOPES.split(',')]
    variables_to_train = []

    # 枚举所有需要训练的参数前缀，并通过这些前缀找到所有需要训练的参数。
    for scope in scopes:
        variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
        variables_to_train.extend(variables)
    return variables_to_train

#使用多线程reader
def read_my_file_format(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'label': tf.FixedLenFeature([], tf.int64),
            'image_raw': tf.FixedLenFeature([], tf.string),
        })

    #labels = tf.cast(features['label'],tf.int32)
    labels = features['label']
    decoded_images = tf.decode_raw(features['image_raw'], tf.uint8)
    # 保存图片，图片格式为pic_*。jpg,'*'表示图片的标签
    image_1 = tf.reshape(decoded_images, [299, 299, 3])  ## reshape 成图片矩阵
    image_1 = tf.image.convert_image_dtype(image_1, dtype=tf.float32)
    return  [image_1,labels]

def get_data(filename):
    files = tf.train.match_filenames_once(filename)
    filename_queue = tf.train.string_input_producer(files, shuffle=False)
    min_after_dequeue = 1500
    batch_size = BATCH
    capacity = min_after_dequeue + 3 * batch_size
    example_list = [read_my_file_format(filename_queue) for _ in range(2)]
    image_batch, label_batch = tf.train.shuffle_batch_join(example_list, batch_size=batch_size,
                                                           capacity=capacity, min_after_dequeue=min_after_dequeue)

    return [image_batch,label_batch]

def main():
    #加载验证集，这里已经打乱重组
    #必须要创建个临时列表，接受prossed，因为prossed[0]的形状为(358,)无法拉伸为（[358,299,299,3]）
    #所以创建临时列表，在转化为numpy对象，合并数组
    prossed = np.load(VALIDATION_FILE_NAME)
    temp = prossed[0]
    validation_label = prossed[1]
    validation_image.extend(temp)
    validation_image1 = np.asarray(validation_image)
    print("%d validation examples is load" % (len(validation_label)))

    # 定义inception-v3的输入，images为输入图片，labels为每一张图片对应的标签。
    images = tf.placeholder(tf.float32, [None, 299, 299, 3], name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')

    # 定义inception-v3模型。因为谷歌给出的只有模型参数取值，所以这里
    # 需要在这个代码中定义inception-v3的模型结构。虽然理论上需要区分训练和
    # 测试中使用到的模型，也就是说在测试时应该使用is_training=False，但是
    # 因为预先训练好的inception-v3模型中使用的batch normalization参数与
    # 新的数据会有出入，所以这里直接使用同一个模型来做测试。
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(
            images, num_classes=N_CLASSES, is_training=True)

    trainable_variables = get_trainable_variables()
    # 定义损失函数和训练过程。
    tf.losses.softmax_cross_entropy(
        tf.one_hot(labels, N_CLASSES), logits, weights=1.0)
    total_loss = tf.losses.get_total_loss()
    train_step = tf.train.RMSPropOptimizer(LEARNING_RATE).minimize(total_loss)

    # 计算正确率。
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # 定义加载Google训练好的Inception-v3模型的Saver。
    load_fn = slim.assign_from_checkpoint_fn(
        CKPT_FILE,
        get_tuned_variables(),
        ignore_missing_vars=True)

    # 定义保存新模型的Saver。
    saver = tf.train.Saver()

    with tf.Session() as sess:
        image_batch,label_batch =  get_data(TRAIN_FILE_NAME)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess,coord=coord)
       # 加载谷歌已经训练好的模型。
        print('Loading tuned variables from %s' % CKPT_FILE)
        load_fn(sess)

        for i in range(STEPS):
            _,loss = sess.run([train_step, total_loss], feed_dict={images: validation_image1,labels:validation_label})

            validation_accuracy = sess.run(evaluation_step, feed_dict={
                images: validation_image1, labels: validation_label})
            print('Step %d: Training loss is %.1f Validation accuracy = %.1f%%' % (
                i, loss, validation_accuracy * 100.0))

        # 在最后的测试数据上测试正确率。
        # test_accuracy = sess.run(evaluation_step, feed_dict={
        #     images: testing_images, labels: testing_labels})
        # print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
        coord.request_stop()
        coord.join(threads)
if __name__ == '__main__':
    main()
