import tensorflow as tf
import numpy as np
from nets import nets_factory

TFRECORD_FILE = 'F:\Idea workspace\\tensorflow-learning\data\sample\\tfrecord\captcha-train.tfrecord'
x = tf.placeholder(tf.float32, [None, 224, 224])
y0 = tf.placeholder(tf.float32, [None])
y1 = tf.placeholder(tf.float32, [None])
y2 = tf.placeholder(tf.float32, [None])
y3 = tf.placeholder(tf.float32, [None])
lr = tf.Variable(1e-4)


def read_and_decode(filename):
    # 根据文件名生成一个队列
    filename_queue = tf.train.string_input_producer([filename])
    _, serialized_example = tf.TFRecordReader().read(filename_queue)

    features = tf.parse_single_example(serialized_example, features={
        'image': tf.FixedLenFeature([], tf.string),
        'label0': tf.FixedLenFeature([], tf.int64),
        'label1': tf.FixedLenFeature([], tf.int64),
        'label2': tf.FixedLenFeature([], tf.int64),
        'label3': tf.FixedLenFeature([], tf.int64),
    })

    # 获取图片数据
    image = tf.decode_raw(features['image'], tf.uint8)
    # tf.train.shuffle_batch必须确定shape
    image = tf.reshape(image, [224, 224])

    # 图片预处理
    image = tf.cast(image, tf.float32) / 255.0

    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    # 获取label
    label0 = tf.cast(features['label0'], tf.int32)
    label1 = tf.cast(features['label1'], tf.int32)
    label2 = tf.cast(features['label2'], tf.int32)
    label3 = tf.cast(features['label3'], tf.int32)

    return image, label0, label1, label2, label3


image, label0, label1, label2, label3 = read_and_decode(TFRECORD_FILE)

# 使用shuffle_batch可以随机打乱
image_batch, label_batch0, label_batch1, label_batch2, label_batch3 = \
    tf.train.shuffle_batch([image, label0, label1, label2, label3], batch_size=25,
                           capacity=50000, min_after_dequeue=10000, num_threads=1)


def create_train():
    X = tf.reshape(x, [25, 224, 224, 1])
    net_construc = nets_factory.get_network_fn('alexnet_v2', 10, weight_decay=0.005, is_training=True)

    # 训练出多个网络
    net0, net1, net2, net3, end_points = net_construc(X)

    # 把标签转成one_hot的形式.   n个数字，只有一个为0
    one_hot_labels0 = tf.one_hot(indices=tf.cast(y0, tf.int32), depth=10)
    one_hot_labels1 = tf.one_hot(indices=tf.cast(y1, tf.int32), depth=10)
    one_hot_labels2 = tf.one_hot(indices=tf.cast(y2, tf.int32), depth=10)
    one_hot_labels3 = tf.one_hot(indices=tf.cast(y3, tf.int32), depth=10)

    loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net0, labels=one_hot_labels0))
    loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net1, labels=one_hot_labels1))
    loss3 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net2, labels=one_hot_labels2))
    loss4 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net3, labels=one_hot_labels3))

    total_loss = (loss1 + loss2 + loss3 + loss4) / 4

    train = tf.train.AdamOptimizer(lr).minimize(total_loss)

    accuracy1 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(one_hot_labels0, 1), tf.argmax(net0, 1)), tf.float32))

    accuracy2 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(one_hot_labels1, 1), tf.argmax(net1, 1)), tf.float32))

    accuracy3 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(one_hot_labels2, 1), tf.argmax(net2, 1)), tf.float32))

    accuracy4 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(one_hot_labels3, 1), tf.argmax(net3, 1)), tf.float32))

    saver = tf.train.Saver()

    return train, accuracy1, accuracy2, accuracy3, accuracy4, saver, total_loss


# squeeze 可将里面shape为1的维度压缩删掉
def squeeze_test():
    """
        shape为1的降维方法

    squeeze 可将里面shape为1的维度压缩删掉
    :return:
    """
    arry = np.array([[[0], [1], [2], [5]]])
    arryt = tf.convert_to_tensor(arry)
    print(np.shape(arry))
    print(arry)
    return tf.squeeze(arryt, [2, 0]), tf.squeeze(arryt)


def run():
    train, accuracy1, accuracy2, accuracy3, accuracy4, saver, total_loss = create_train()

    with tf.Session() as session:
        # 初始化
        session.run(tf.global_variables_initializer())

        # 创建一个协调器，管理线程
        coord = tf.train.Coordinator()

        # 启动QueueRunner, 此时文件名队列已经进队
        threads = tf.train.start_queue_runners(sess=session, coord=coord)

        for i in range(10001):

            # 获取一个批次的数据和标签
            b_image, b_label0, b_label1, b_label2, b_label3 = session.run(
                [image_batch, label_batch0, label_batch1, label_batch2, label_batch3])

            # 优化模型
            session.run(train, feed_dict={x: b_image, y0: b_label0, y1: b_label1, y2: b_label2, y3: b_label3})

            # 每迭代20次计算一次loss和准确率
            if i % 20 == 0:
                # 每迭代2000次降低一次学习率
                if i % 2000 == 0:
                    session.run(tf.assign(lr, lr / 2))
                    acc0, acc1, acc2, acc3, loss_ = session.run(
                        [accuracy4, accuracy1, accuracy2, accuracy3, total_loss],
                        feed_dict={x: b_image,
                                   y0: b_label0,
                                   y1: b_label1,
                                   y2: b_label2,
                                   y3: b_label3})
                    print("Iter:%d  Loss:%.3f  Accuracy:%.2f,%.2f,%.2f,%.2f  " % (
                        i, loss_, acc0, acc1, acc2, acc3))

                # 保存模型
                # if acc0 > 0.90 and acc1 > 0.90 and acc2 > 0.90 and acc3 > 0.90: # 准确值大于0.96 就停止训练
                if i == 10000 or (acc0 + acc1 + acc2 + acc3) / 4 > 0.99:
                    saver.save(session,
                               "F:/Idea workspace/tensorflow-learning/data/model/multi-task/crack_captcha.model",
                               global_step=i)
                    break

        # 通知其他线程关闭

        coord.request_stop()

        # 其他所有线程关闭之后，这一函数才能返回
        coord.join(threads)


if __name__ == '__main__':
    run()
