# coding:utf-8
# Author : hiicy redldw
# Date : 2019/04/17
import tensorflow as tf
import numpy as np

# 样本个数
sample_num = 5
# 设置迭代次数
epoch_num = 2
# 设置一个批次中包含样本个数
batch_size = 3
# 计算每一轮epoch中含有的batch个数
batch_total = int(sample_num / batch_size) + 1


def generate_data(sample_num=sample_num):
    labels = np.asarray(range(0, sample_num))
    images = np.random.random([sample_num, 224, 224, 3])
    print("image size {}, label size {}".format(images.shape, labels.shape))
    return images, labels
"""
def im(i):
    x =tf.layers.conv2d(i,10,(15,15),strides=20)
    x= tf.layers.flatten(x)
    return tf.layers.dense(x,1)
def los(i,j):
    return i-j
def ju(targets):
    return targets*10
img = tf.placeholder('float32',shape=[None,224,224,3])
p = im(img)
lo = los(p,label_batch)
"""
def get_batch_data(batch_size=batch_size):
    images, label = generate_data()
    images = tf.cast(images, tf.float32)
    label = tf.cast(label, tf.float32)

    # 从tensor列表中按顺序或随机抽取一个tensor准备放入文件名称队列 返回列表tensor列表已出队
    input_queue = tf.train.slice_input_producer([images, label], num_epochs=epoch_num, shuffle=False)

    target = input_queue[1] +10


    image_batch,label_batch = tf.train.batch([input_queue[0],input_queue[1]],batch_size)
    # REW:用你batch的run出来的值 去反馈你，也可以！
    # tf.train.batch是一个tensor队列生成器，作用是按照给定的tensor顺序，把batch_size个tensor推送到文件队列，作为训练一个batch的数据，等待tensor出队执行计算
    # image_batch, label_batch = tf.train.batch(input_queue, batch_size, num_threads=2, capacity=64,
    #                                           allow_smaller_final_batch=False)
    return image_batch, label_batch


# image_batch:tf.Tensor object
image_batch,label_batch = get_batch_data(batch_size=batch_size)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    coord = tf.train.Coordinator()
    # 还需要调用tf.train.start_queue_runners
    # REW 函数来启动执行文件名队列填充的线程，之后计算单元才可以把数据读出来，否则文件名队列为空的，计算单元就会处于一直等待状态，导致系统阻塞
    threads = tf.train.start_queue_runners(sess,coord)
    try:
        while not coord.should_stop():
            print('******************')
            image_batch_v,label_batch_v = sess.run([image_batch,label_batch])
            print(image_batch_v.shape, label_batch_v)

            # lz,pv= sess.run([lo,p],feed_dict={img:image_batch_v,label_batch:label_batch_v})
            # print('loss',lz)
            # print('pred',pv)
            # print('label',label_batch_v)
    except tf.errors.OutOfRangeError:#如果读取到文件队列末尾会抛出此异常
        print("done! now lets kill all the threads……")
    finally:
        # 协调器coord发出所有线程终止信号
        coord.request_stop()
        print('all threads are asked to stop!')
    coord.join(threads)  # 把开启的线程加入主线程，等待threads结束
    print('all threads are stopped!')

# import threading
# import time
# #创建一个函数实现多线程，参数为Coordinater和线程号
# def func(coord, t_id):
#     count = 0
#     while not coord.should_stop(): #不应该停止时计数
#         print('thread ID:',t_id, 'count =', count)
#         count += 1
#         time.sleep(2)
#         if count == 5: #计到5时请求终止
#             coord.request_stop()
# coord = tf.train.Coordinator()
# threads = [threading.Thread(target=func, args=(coord, i)) for i in range(4)]
# #开始所有线程
# for t in threads:
#     t.start()
# coord.join(threads) #等待所有线程结束

# batch_size = 2
# # 随机产生一个2*2的张量
# example = tf.random_normal([2, 2])
# # 创建一个RandomShuffleQueue，
# q = tf.RandomShuffleQueue(
#     capacity=1000,
#     min_after_dequeue=0,
#     dtypes=tf.float32,
#     shapes=[2, 2])
# # enqueue op，每次push一个张量
# enq_op = q.enqueue(example)

# # dequeue op, 每次取出batch_size个张量
# xs = q.dequeue_many(batch_size)

# # 创建QueueRunner，包含4个enqueue op线程
# qr = tf.train.QueueRunner(q, [enq_op] * 4)
# coord = tf.train.Coordinator()
# sess = tf.Session()
# # 启动QueueRuner，开始线程
# enq_threads = qr.create_threads(sess, coord=coord, start=True)
# for i in range(10):
#     if coord.should_stop():
#         break
#     print('step:', i, sess.run(xs))  # 打印结果
# coord.request_stop()
# coord.join(enq_threads)


BATCH_SIZE = 6
NUM_EXPOCHES = 5


def input_producer():
    array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
             30]
    i = tf.train.range_input_producer(NUM_EXPOCHES, num_epochs=1, shuffle=False).dequeue()
    inputs = tf.slice(array, [i * BATCH_SIZE], [BATCH_SIZE])
    return inputs


class Inputs(object):
    def __init__(self):
        self.inputs = input_producer()


def main(*args, **kwargs):
    inputs = Inputs()
    init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    sess = tf.Session()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    sess.run(init)
    try:
        index = 0
        while not coord.should_stop() and index < 10:
            datalines = sess.run(inputs.inputs)
            index += 1
            print("step: %d, batch data: %s" % (index, str(datalines)))
    except tf.errors.OutOfRangeError:
        print("Done traing:-------Epoch limit reached")
    except KeyboardInterrupt:
        print("keyboard interrput detected, stop training")
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
    del sess
# main()
