import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
import cv2
import os
import time
from tensorflow.data import Dataset

def lrelu(x):
    return tf.maximum(x * 0.2, x)
 
activation_fn=lrelu
 
def UNet(inputs, reg):  # Unet
    conv1 = slim.conv2d(inputs, 32, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv1_1', weights_regularizer=reg)
    conv1 = slim.conv2d(conv1, 32, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv1_2',weights_regularizer=reg)
    pool1 = slim.max_pool2d(conv1, [2, 2], padding='SAME')
 
    conv2 = slim.conv2d(pool1, 64, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv2_1',weights_regularizer=reg)
    conv2 = slim.conv2d(conv2, 64, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv2_2',weights_regularizer=reg)
    pool2 = slim.max_pool2d(conv2, [2, 2], padding='SAME')
 
    conv3 = slim.conv2d(pool2, 128, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv3_1',weights_regularizer=reg)
    conv3 = slim.conv2d(conv3, 128, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv3_2',weights_regularizer=reg)
    pool3 = slim.max_pool2d(conv3, [2, 2], padding='SAME')
 
    conv4 = slim.conv2d(pool3, 256, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv4_1',weights_regularizer=reg)
    conv4 = slim.conv2d(conv4, 256, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv4_2',weights_regularizer=reg)
    pool4 = slim.max_pool2d(conv4, [2, 2], padding='SAME')
 
    conv5 = slim.conv2d(pool4, 512, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv5_1',weights_regularizer=reg)
    conv5 = slim.conv2d(conv5, 512, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv5_2',weights_regularizer=reg)
 
    up6 = upsample_and_concat(conv5, conv4, 256, 512)
    conv6 = slim.conv2d(up6, 256, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv6_1',weights_regularizer=reg)
    conv6 = slim.conv2d(conv6, 256, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv6_2',weights_regularizer=reg)
 
    up7 = upsample_and_concat(conv6, conv3, 128, 256)
    conv7 = slim.conv2d(up7, 128, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv7_1',weights_regularizer=reg)
    conv7 = slim.conv2d(conv7, 128, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv7_2',weights_regularizer=reg)
 
    up8 = upsample_and_concat(conv7, conv2, 64, 128)
    conv8 = slim.conv2d(up8, 64, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv8_1',weights_regularizer=reg)
    conv8 = slim.conv2d(conv8, 64, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv8_2',weights_regularizer=reg)
 
    up9 = upsample_and_concat(conv8, conv1, 32, 64)
    conv9 = slim.conv2d(up9, 32, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv9_1', weights_regularizer=reg)
    conv9 = slim.conv2d(conv9, 32, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv9_2',weights_regularizer=reg)
    print("conv9.shape:{}".format(conv9.get_shape()))
 
    type='UNet_1X'
    with tf.variable_scope(name_or_scope="output"):
        if type=='UNet_3X':#UNet放大三倍
            conv10 = slim.conv2d(conv9, 27, [1, 1], rate=1, activation_fn=None, scope='g_conv10',weights_regularizer=reg)
            out = tf.depth_to_space(conv10, 3)
        if type=='UNet_1X':#输入输出维度相同
            out = slim.conv2d(conv9, 1, [1, 1], rate=1, activation_fn=None, scope='g_conv10',weights_regularizer=reg)
    return out
 
def upsample_and_concat(x1, x2, output_channels, in_channels):
    pool_size = 2
    deconv_filter = tf.Variable(tf.truncated_normal([pool_size, pool_size, output_channels, in_channels], stddev=0.02))
    deconv = tf.nn.conv2d_transpose(x1, deconv_filter, tf.shape(x2), strides=[1, pool_size, pool_size, 1])
 
    deconv_output = tf.concat([deconv, x2], 3)
    deconv_output.set_shape([None, None, None, output_channels * 2])
    return deconv_output





def auto_encoding_create_dataset(image_dir_src,image_dir_label,label_extend = None,batch_size = 1,epoch = None,shuffle = True):
    #从文件名dataset 到图像tensor的map函数
    def _parse_function(src_path, label_path = None):
        src_string = tf.read_file(src_path)
        src_decoded = tf.image.decode_bmp(src_string,channels=1)
        if(image_dir_label is not None):
            label_string = tf.read_file(label_path)
            label_decoded = tf.image.decode_png(label_string,channels=1)
            return src_decoded, label_decoded
        else:
            return src_decoded
    #构造文件名dataset
    src_dirs = os.listdir(image_dir_src)
    if(len(src_dirs) == 0):
        return

    if(image_dir_label is not None):
        if(label_extend is  None):
            label_extend = os.path.splitext(src_dirs[0])[1]
        label_dirs = [os.path.splitext(item)[0] +'.'+ label_extend for item in src_dirs]
        label_dirs = [os.path.join(image_dir_label, item) for item in label_dirs]
        src_dirs = [os.path.join(image_dir_src, item) for item in src_dirs]
        dataset = Dataset.from_tensor_slices((src_dirs,label_dirs))
    else:
        src_dirs = [os.path.join(image_dir_src, item) for item in src_dirs]
        dataset = Dataset.from_tensor_slices(src_dirs)
    if(shuffle):
        dataset = dataset.shuffle(len(src_dirs)*5)
    dataset = dataset.map(_parse_function)
    dataset = dataset.batch(batch_size)
    dataset = dataset.repeat(epoch)

    iterator = dataset.make_one_shot_iterator()
    one_element = iterator.get_next()
    return one_element



def auto_encoding_tain(image_dir_src,image_dir_label,image_width = 512,image_height=512,learning_rate = 1e-3,
                       label_extend = None,batch_size = 1,epoch = None,shuffle = True,log_path = None):
    image, label = auto_encoding_create_dataset(image_dir_src, image_dir_label, label_extend, batch_size, epoch, shuffle)
    image = tf.cast(image,tf.float32)
    image = tf.image.resize_images(image,(image_height,image_width))
    label = tf.cast(label,tf.float32)
    label = tf.image.resize_images(label, (image_height,image_width))
    reg = slim.l2_regularizer(scale=0.001)
    image_out=UNet(image,reg)
    label = tf.reshape(label,[-1,image_height*image_width])
    image_out=tf.reshape(image_out,[-1,image_height*image_width])
    label_data = label/255.0
    label_data = tf.to_int64(label_data)
    image_out_data = (image_out-tf.reduce_min(image_out))/(tf.reduce_max(image_out)-tf.reduce_min(image_out))
    image_out_data = tf.round(image_out_data)
    image_out_data =tf.to_int64(image_out_data)
    image_out_data = tf.one_hot(image_out_data,2)
    with tf.Session() as sess:
        data=sess.run(label_data)
 
    #loss_mid = tf.reduce_mean((label - mid_image) ** 2) #L2 loss
    #loss_mid2 = tf.reduce_mean((label - mid_image2) ** 2) #L2 loss
    loss_L1  = tf.reduce_mean((label - image_out) ** 2)   #L2 loss
    #loss=0.1*loss_mid+0.3*loss_mid2+0.6*loss_ae+cross_entropy+cgan_loss*10

    loss_softmax = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_data, 
                                                                  logits=image_out_data)
    loss_softmax = tf.reduce_mean(loss_softmax)

    loss =loss_L1+loss_softmax*1000
    #summary
    with tf.name_scope("loss"):
        tf.summary.scalar("loss_total",loss)
        tf.summary.scalar("loss_L1",loss_L1)
        tf.summary.scalar("loss_softmax",loss_softmax)

    image_summary = tf.reshape(image_out,[-1,image_height,image_width,1])
    label_summary = tf.reshape(label,[-1,image_height,image_width,1])
    tf.summary.image("image", image)
    tf.summary.image("label", label_summary)
    tf.summary.image("image_result",image_summary)
    
    optimizer = tf.train.AdamOptimizer(learning_rate)
    train_op = slim.learning.create_train_op(loss, optimizer)

    #开始训练
    tf.logging.set_verbosity(tf.logging.INFO)
    slim.learning.train(train_op,log_path,save_summaries_secs=60,save_interval_secs=300)


def  auto_encoding_test(image_path,out_path,checkpoint_path,image_width = 512,image_height=512):
    if tf.gfile.IsDirectory(checkpoint_path):
        checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
    if(checkpoint_path is None):
        print("load ckeckpoint fail")
    files = os.listdir(image_path)
    files = [os.path.join(image_path, item) for item in files]
    if(len(files) == 0):
        return
    #w,h = cv2.imread(files[0],cv2.IMREAD_GRAYSCALE).shape
    # image_input = tf.placeholder(tf.float32)
    image_input = auto_encoding_create_dataset(image_path,None,None,epoch=1,shuffle=False)
    image_input = tf.cast(image_input, tf.float32)
    image_input = tf.image.resize_images(image_input, (image_height,image_width))
    reg = slim.l2_regularizer(scale=0.001)
    imageDecode = UNet(image_input,reg)
    imageDecode = tf.reshape(imageDecode,[-1,image_height,image_width,1])
    saver = tf.train.Saver()
    index = 0
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if(checkpoint_path is not None):
            saver.restore(sess, checkpoint_path)
        for file in files:
            # img=image_input.eval()[0]
            # img=np.reshape(img,(400,600))
            # img=img.astype(np.uint8)
            # cv2.imshow('img',img)
            # cv2.waitKey(0)
            w,h = cv2.imread(file,cv2.IMREAD_GRAYSCALE).shape
            sta=time.time()
            imageMark = sess.run(imageDecode)#取第二个结果
            print('处理时间：'+str(time.time()-sta))
            imageMark = np.reshape(imageMark, (image_height,image_width))
            imageMark = (imageMark-np.min(imageMark))/(np.max(imageMark)-np.min(imageMark))*255
            imageMark = cv2.resize(imageMark,(h,w))
            imageMark[imageMark>255] = 255
            imageMark[imageMark < 0] = 0
            imageMark = imageMark.astype(np.uint8)
            # cv2.imshow('',imageMark)
            # cv2.waitKey(0)
            print("处理图片文件："+file)
            cv2.imwrite(os.path.join(out_path,os.path.basename(file)),imageMark)
            index+=1

if __name__ == "__main__":
    '''
    参数初值
    '''
    IMAGE_PATH = './imagedata3'  #原图路径
    LABEL_PATH = './labels3'  #标记图路径
    BATCH_SIZE = 1
    EPOCH = None                 #运行的epoch数据None表示无限运行
    IMAGE_WIDTH = 512           #图像宽度
    IMAGE_HEIGHT = 512          #图像高度
    LEARNING_RATE = 0.0002         #学习率
    LOG_PATH = "./log_unet"
    TRAIN =False
    TEST_PATH = "./image_raw"
    #TEST_PATH = "./imagedata"

    TEST_OUTPUT = "./result_unet"
    

    if(TRAIN):
        auto_encoding_tain(IMAGE_PATH,LABEL_PATH,IMAGE_WIDTH,IMAGE_HEIGHT,LEARNING_RATE,'png',BATCH_SIZE,EPOCH,True,LOG_PATH)
    else:#Test
        auto_encoding_test(TEST_PATH,TEST_OUTPUT,LOG_PATH,IMAGE_WIDTH,IMAGE_HEIGHT)
