import tensorflow as tf
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from nets2 import nets_factory

TFRECORD_FILE = 'F:/Idea workspace/tensorflow-learning/data/sample/tfrecord/captcha-train.tfrecord'

BATCH_SIZE = 14

MODEL_FILE = 'F:/Idea workspace/tensorflow-learning/data/model/line/crack_captcha_line.model'


def read_tfrecord(file_path):
    queue = tf.train.string_input_producer([file_path])
    _, serialized_example = tf.TFRecordReader().read(queue)
    features = tf.parse_single_example(serialized_example, features={
        "image": tf.FixedLenFeature([], dtype=tf.string),
        'label0': tf.FixedLenFeature([], dtype=tf.int64),
        'label1': tf.FixedLenFeature([], dtype=tf.int64),
        'label2': tf.FixedLenFeature([], dtype=tf.int64),
        'label3': tf.FixedLenFeature([], dtype=tf.int64),
    })

    # 获取图片数据
    image = tf.decode_raw(features['image'], tf.uint8)
    # tf.train.shuffle_batch必须确定shape

    # 没有经过预处理的灰度图
    image_raw = tf.reshape(image, [224, 224])
    # tf.train.shuffle_batch必须确定shape
    image = tf.reshape(image, [224, 224])

    # 图片预处理  图片的处理，将图片转成[-1,1]区间
    image = tf.cast(image, tf.float32) / 255.0
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)

    label0 = tf.cast(features['label0'], tf.int64)
    label1 = tf.cast(features['label1'], tf.int64)
    label2 = tf.cast(features['label2'], tf.int64)
    label3 = tf.cast(features['label3'], tf.int64)

    return image, image_raw, label0, label1, label2, label3


image, image_raw, label0, label1, label2, label3 = read_tfrecord(TFRECORD_FILE)

# 使用shuffle_batch可以随机打乱
image_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch(
    [image, image_raw, label0, label1, label2, label3], batch_size=BATCH_SIZE,
    capacity=50000, min_after_dequeue=10000, num_threads=1)

net_construct = nets_factory.get_network_fn('alexnet_v2', 40, weight_decay=0.005, is_training=True)

# placeholder
x = tf.placeholder(tf.float32, [None, 224, 224])
y0 = tf.placeholder(tf.float32, [None])
y1 = tf.placeholder(tf.float32, [None])
y2 = tf.placeholder(tf.float32, [None])
y3 = tf.placeholder(tf.float32, [None])

with tf.Session() as session:
    # inputs: a tensor of size [batch_size, height, width, channels]
    X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1])
    logits, end_points = net_construct(X)

    one_hot_labels0 = tf.one_hot(indices=tf.cast(y0, tf.int32), depth=10)
    one_hot_labels1 = tf.one_hot(indices=tf.cast(y1, tf.int32), depth=10)
    one_hot_labels2 = tf.one_hot(indices=tf.cast(y2, tf.int32), depth=10)
    one_hot_labels3 = tf.one_hot(indices=tf.cast(y3, tf.int32), depth=10)

    y = tf.concat([one_hot_labels0, one_hot_labels1, one_hot_labels2, one_hot_labels3], 1)
    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))

    train = tf.train.AdamOptimizer(1e-4).minimize(loss=loss)

    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1)), tf.float32))

    session.run(tf.global_variables_initializer())

    saver = tf.train.Saver()
    # 创建一个协调器，管理线程
    coord = tf.train.Coordinator()

    # 启动QueueRunner, 此时文件名队列已经进队
    threads = tf.train.start_queue_runners(sess=session, coord=coord)

    for i in range(80000):
        image, label0, label1, label2, label3 = \
            session.run([image_batch, label_batch0, label_batch1, label_batch2, label_batch3])
        session.run(train, feed_dict={x: image, y0: label0, y1: label1, y2: label2, y3: label3})

        if i % 2000 == 0:
            print(session.run([loss, accuracy], feed_dict={x: image, y0: label0, y1: label1, y2: label2, y3: label3}))

        # 通知其他线程关闭
    saver.save(session, MODEL_FILE)
    coord.request_stop()
    # 其他所有线程关闭之后，这一函数才能返回
    coord.join(threads)
