 # -*- coding: utf-8 -*-
from glob import glob 
import os
import tensorflow as tf
import numpy as np
import cv2 
import matplotlib.pyplot as plt
#from PIL import Image
import scipy
import scipy.stats as st
from PIL import Image 
from architecture import *
import time

os.environ['CUDA_VISIBLE_DEVICES']='2'

EPSILON = 0

input_height = 64
input_width = 64

batch_size = 9

total_height = 2
total_width = 2

Z_DIM = 100

matix_size= 64
matrix_pad = 8

line_height = input_height

learning_rate = 0.00021
momentum = 0.5

checkpoints_path = './checkpoints/'
graph_path = './graphs/'
images_path = './images/'
measurement = 'patch_generator'


continue_training = False
epochs =1000

assist_variable = 1

BATCHSIZE = 128
IMG_H = 64
IMG_W = 64
IMG_C = 3
Z_DIM = 100
MASK_H = 32
MASK_W = 32

def patch_generator(input, k_size=16):
    shape = input.get_shape().as_list()
    if len(shape) == 3:
        patch = tf.zeros([k_size, k_size, shape[-1]], dtype=tf.float32)
        
        #devide four take the remainder.
        rand_num = tf.random_uniform([2], minval=0, maxval=shape[1]-k_size, dtype=tf.int32)%2*32

        h_, w_ = rand_num[0], rand_num[1]
        print("###############",h_, w_)

        padding = [[h_, shape[1]-h_-k_size], [w_, shape[1]-w_-k_size], [0, 0]]
        padded = tf.pad(patch, padding, "CONSTANT", constant_values=1)

        res = tf.multiply(input, padded) + (1-padded)
    else:
        patch = tf.zeros([k_size, k_size, shape[-1]], dtype=tf.float32)
        res = []
        for idx in range(0,shape[0]):
            rand_num = tf.random_uniform([2], minval=0, maxval=shape[1]-k_size, dtype=tf.int32)%2*32
            h_, w_ = rand_num[0], rand_num[1]

            padding = [[h_, shape[1]-h_-k_size], [w_, shape[1]-w_-k_size], [0, 0]]
            padded = tf.pad(patch, padding, "CONSTANT", constant_values=1)

            res.append(tf.multiply(input[idx], padded) + (1-padded))
        res = tf.stack(res)
    return res, padded


def draw_line(input, k_size=1):
    shape = input.get_shape().as_list()
    if len(shape) == 3:
        patch = tf.zeros([line_height,k_size, shape[-1]], dtype=tf.float32)

        h_ = shape[0]- line_height
        numb = tf.random_uniform([1], minval=0, maxval=shape[1]-k_size, dtype=tf.int32)
        w_ = numb[0]
        
        padding = [[h_, shape[0]-line_height], [w_, shape[1]-w_-k_size], [0, 0]]

        padded = tf.pad(patch, padding, "CONSTANT", constant_values=0)

        res = tf.multiply(input, padded)+ (1-padded)
    else:
        patch = tf.zeros([line_height,k_size, shape[-1]], dtype=tf.float32)
        res = []
        for idx in range(0,shape[0]):
            h_ = shape[0]- line_height
            numb = tf.random_uniform([1], minval=0, maxval=shape[1]-k_size, dtype=tf.int32)
            w_ = numb[0]
            padding = [[h_, shape[0]- line_height], [w_, shape[1]-w_-k_size], [0, 0]]
            
            padded = tf.pad(patch, padding, "CONSTANT", constant_values=0)

            res.append(tf.multiply(input[idx], padded) + (1-padded))
        res = tf.stack(res)
    return res, padded


def draw_horizontal_line(input, k_size=3):
    shape = input.get_shape().as_list()
    if len(shape) == 3:
        patch = tf.zeros([k_size,line_height, shape[-1]], dtype=tf.float32)
        print("patch is :",patch)
        numb = tf.random_uniform([1], minval=0,maxval=shape[1]-k_size, dtype=tf.int32)%4*15
        h_ = numb[0] 
        w_ = shape[1]-line_height
        padding = [[h_, shape[1]-h_-k_size], [w_, shape[1]-line_height-w_ ], [0, 0]]

        print("padding is :",padding)
        padded = tf.pad(patch, padding, "CONSTANT", constant_values=1)
        res = tf.multiply(input, padded)+ (1-padded)
    else:
        patch = tf.zeros([k_size,line_height, shape[-1]], dtype=tf.float32)
        res = []
        for idx in range(0,shape[0]):
            numb = tf.random_uniform([1], minval=0, maxval=shape[1]-k_size, dtype=tf.int32)%4*15
            h_ = numb[0]
            w_ = shape[1]-line_height
            padding = [[h_, shape[1]-h_-k_size], [w_,shape[1]-w_-line_height], [0, 0]]
            padded = tf.pad(patch, padding, "CONSTANT", constant_values=1)
            res.append(tf.multiply(input[idx], padded) + (1-padded))
        res = tf.stack(res)
    return res, padded

def load_data(original_path):
    paths = os.path.join(original_path, "./64_64/*.png")
    data_count = len(glob(paths))
    filename_queue = tf.train.string_input_producer(tf.io.match_filenames_once(paths))
    image_reader = tf.WholeFileReader()
    _, image_file = image_reader.read(filename_queue)
    images = tf.image.decode_png(image_file, channels=3)

    #print(images.eval())
    #input image range from -1 to 1
    #center crop 32x32 since raw images are not center cropped.
    #images = tf.image.central_crop(images, 0.5)

    images = tf.image.resize(images ,[input_height,input_width])
    images = tf.image.convert_image_dtype(images, dtype=tf.float32) / 127.5 - 1
    print("*************image is 1:",images)

    #apply measurement models

    images, mask = patch_generator(images, k_size=28)
    #images, mask = conv_noise(images)

    mask = tf.reshape(mask, [input_height, input_height, 3])
    mask = tf.image.convert_image_dtype(mask, dtype=tf.float32)

    train_batch, masks = tf.train.shuffle_batch([images, mask],
                                         batch_size=batch_size,
                                         capacity=batch_size*2,
                                         min_after_dequeue=batch_size
                                        )

    return train_batch, masks, data_count


def cal_tf_psnr(im1,im2):
    psnr2= tf.image.psnr(im1, im2, max_val=1.0)
    return  psnr2


def img_save(epoch, imgs, aspect_ratio=0.5, tile_shape=[total_height,total_width], border=1, border_color=0, name="input"):
    if imgs.ndim != 3 and imgs.ndim != 4:
        raise ValueError('imgs has wrong number of dimensions.')
    n_imgs = imgs.shape[0]

    img_shape = np.array(imgs.shape[1:3])
    if tile_shape is None:
        img_aspect_ratio = img_shape[1] / float(img_shape[0])
        aspect_ratio *= img_aspect_ratio
        tile_height = int(np.ceil(np.sqrt(n_imgs * aspect_ratio)))
        tile_width = int(np.ceil(np.sqrt(n_imgs / aspect_ratio)))
        grid_shape = np.array((4, 5))
    else:
        assert len(tile_shape) == 2
        grid_shape = np.array(tile_shape)

    # Tile image shape
    tile_img_shape = np.array(imgs.shape[1:])
    tile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border

    # Assemble tile image
    tile_img = np.empty(tile_img_shape)
    tile_img[:] = border_color

    for i in range(grid_shape[0]):
        for j in range(grid_shape[1]):
            img_idx = j + i*grid_shape[1]
            if img_idx >= n_imgs:
                break
            #if img_idx > batch_size:
            #    img_idx=assist_variable
            #    assist_variable= assist_variable+1
            #print("******img_idx is :",img_idx)
            img = imgs[img_idx]

            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            if name == "masks":
                image1 = (img + 1)*127.5
            else:
                image1 = (img + 1)*127.5

            #cv2.imwrite(images_path+str(epoch)+str(i)+name+'.png',image1)

            yoff = (img_shape[0] + border) * i
            xoff = (img_shape[1] + border) * j
            #print("xoff,yoff is :",xoff,yoff)
            
            tile_img[yoff:yoff+img_shape[0], xoff:xoff+img_shape[1], ...] = img
            #cv2.imwrite(images_path+"/img_"+str(epoch)+"_"+name+ ".jpg", (yoff + 1)*127.5)
            #cv2.imwrite(images_path+"/img_"+str(epoch)+"_"+name+ ".jpg", (xoff + 1)*127.5)
    cv2.imwrite(images_path+"/img_"+str(epoch)+"_"+name+ ".png", (tile_img + 1)*127.5)



class Generator2:
    def __init__(self, name):
        self.name = name

    def __call__(self, Z):
        with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
            inputs = tf.reshape(tf.nn.relu((fully_connected("linear", Z, 4*4*512))), [-1, 4, 4, 512])
            inputs = tf.nn.relu(InstanceNorm(uconv("deconv1", inputs, 256, 5, 2), "IN1"))
            inputs = tf.nn.relu(InstanceNorm(uconv("deconv2", inputs, 128, 5, 2), "IN2"))
            inputs = tf.nn.relu(InstanceNorm(uconv("deconv3", inputs, 64, 5, 2), "IN3"))
            inputs = tf.nn.tanh(uconv("deconv4", inputs, IMG_C, 5, 2))
            return inputs

    @property
    def var(self):
        return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)


class Generator:
    def __init__(self, name):
        self.name = name
        self.batch_size = batch_size
    def __call__(self, input, reuse=False):
        input_shape = input.get_shape().as_list()
        nets = []
        with tf.variable_scope(self.name, reuse=reuse) as scope:
            conv1 = conv2d(input, 64,kernel=5, stride=1, padding="SAME",name="conv1")
            conv1 = batch_norm(conv1, name="conv_bn1")
            conv1 = tf.nn.relu(conv1)
            
            conv2 = conv2d(conv1, 128,
                          kernel=3,
                          stride=2,
                          padding="SAME",
                          name="conv2"
                          )
            conv2 = batch_norm(conv2, name="conv_bn2")
            conv2 = tf.nn.relu(conv2)

            conv3 = conv2d(conv2, 128,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv3"
                          )
            conv3 = batch_norm(conv3, name="conv_bn3")
            conv3 = tf.nn.relu(conv3)

            conv4 = conv2d(conv3, 256,
                          kernel=3,
                          stride=2,
                          padding="SAME",
                          name="conv4"
                          )
            conv4 = batch_norm(conv4, name="conv_bn4")
            conv4 = tf.nn.relu(conv4)

            conv5 = conv2d(conv4, 256,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv5"
                          )
            conv5 = batch_norm(conv5, name="conv_bn5")
            conv5 = tf.nn.relu(conv5)

            conv6 = conv2d(conv5, 256,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv6"
                          )
            conv6 = batch_norm(conv5, name="conv_bn6")
            conv6 = tf.nn.relu(conv5)

            #Dilated conv from here
            dilate_conv1 = dilate_conv2d(conv6, 
                                        [self.batch_size, conv6.get_shape()[1], conv6.get_shape()[2], 256],
                                        rate=2,
                                        name="dilate_conv1")

            dilate_conv2 = dilate_conv2d(dilate_conv1, 
                                        [self.batch_size, dilate_conv1.get_shape()[1], dilate_conv1.get_shape()[2], 256],
                                        rate=4,
                                        name="dilate_conv2")

            dilate_conv3 = dilate_conv2d(dilate_conv2, 
                                        [self.batch_size, dilate_conv2.get_shape()[1], dilate_conv2.get_shape()[2], 256],
                                        rate=8,
                                        name="dilate_conv3")

            dilate_conv4 = dilate_conv2d(dilate_conv3, 
                                        [self.batch_size, dilate_conv3.get_shape()[1], dilate_conv3.get_shape()[2], 256],
                                        rate=16,
                                        name="dilate_conv4")                                                                                              

            #resize back
            conv7 = conv2d(dilate_conv4, 256,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv7"
                          )
            conv7 = batch_norm(conv7, name="conv_bn7")
            conv7 = tf.nn.relu(conv7)

            conv8 = conv2d(conv7, 256,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv8"
                          )
            conv8 = batch_norm(conv8, name="conv_bn8")
            conv8 = tf.nn.relu(conv8)

            deconv1 = deconv2d(conv8, 4, [self.batch_size, int(input_shape[1]/2), int(input_shape[2]/2), 128], name="deconv1")
            deconv1 = batch_norm(deconv1, name="deconv_bn1")
            deconv1 = tf.nn.relu(deconv1)

            conv9 = conv2d(deconv1, 128,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv9"
                          )
            conv9 = batch_norm(conv9, name="conv_bn9")
            conv9 = tf.nn.relu(conv9)

            deconv2 = deconv2d(conv9, 4, [self.batch_size, input_shape[1], input_shape[2], 64], name="deconv2")
            deconv2 = batch_norm(deconv2, name="deconv_bn2")
            deconv2 = tf.nn.relu(deconv2)

            conv10 = conv2d(deconv2, 32,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv10"
                          )
            conv10 = batch_norm(conv10, name="conv_bn10")
            conv10 = tf.nn.relu(conv10)

            conv11 = conv2d(conv10, 3,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv11"
                          )
            conv11 = batch_norm(conv11, name="conv_bn11")
            conv11 = tf.nn.tanh(conv11)

            return conv11, nets

    @property
    def var(self):
        return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)

class Generator3:
    def __init__(self, name):
        self.name = name
        self.batch_size = batch_size
    def __call__(self, input, reuse=False):
        input_shape = input.get_shape().as_list()
        nets = []
        with tf.variable_scope(self.name, reuse=reuse) as scope:
            conv1 = conv2d(input, 64,kernel=5, stride=1, padding="SAME",name="conv3_1")
            conv1 = batch_norm(conv1, name="conv_bn3_1")
            conv1 = tf.nn.relu(conv1)
            
            conv2 = conv2d(conv1, 128,
                          kernel=3,
                          stride=2,
                          padding="SAME",
                          name="conv3_2"
                          )
            conv2 = batch_norm(conv2, name="conv_bn3_2")
            conv2 = tf.nn.relu(conv2)

            conv3 = conv2d(conv2, 128,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv3_3"
                          )
            conv3 = batch_norm(conv3, name="conv_bn3_3")
            conv3 = tf.nn.relu(conv3)

            conv4 = conv2d(conv3, 256,
                          kernel=3,
                          stride=2,
                          padding="SAME",
                          name="conv3_4"
                          )
            conv4 = batch_norm(conv4, name="conv_bn3_4")
            conv4 = tf.nn.relu(conv4)

            conv5 = conv2d(conv4, 256,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv3_5"
                          )
            conv5 = batch_norm(conv5, name="conv_bn3_5")
            conv5 = tf.nn.relu(conv5)

            conv6 = conv2d(conv5, 256,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv3_6"
                          )
            conv6 = batch_norm(conv5, name="conv_bn3_6")
            conv6 = tf.nn.relu(conv5)

            #Dilated conv from here
            dilate_conv1 = dilate_conv2d(conv6, 
                                        [self.batch_size, conv6.get_shape()[1], conv6.get_shape()[2], 256],
                                        rate=2,
                                        name="dilate_conv3_1")

            dilate_conv2 = dilate_conv2d(dilate_conv1, 
                                        [self.batch_size, dilate_conv1.get_shape()[1], dilate_conv1.get_shape()[2], 256],
                                        rate=4,
                                        name="dilate_conv3_2")

            dilate_conv3 = dilate_conv2d(dilate_conv2, 
                                        [self.batch_size, dilate_conv2.get_shape()[1], dilate_conv2.get_shape()[2], 256],
                                        rate=8,
                                        name="dilate_conv3_3")

            dilate_conv4 = dilate_conv2d(dilate_conv3, 
                                        [self.batch_size, dilate_conv3.get_shape()[1], dilate_conv3.get_shape()[2], 256],
                                        rate=16,
                                        name="dilate_conv3_4")                                                                                              

            #resize back
            conv7 = conv2d(dilate_conv4, 256,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv3_7"
                          )
            conv7 = batch_norm(conv7, name="conv_bn3_7")
            conv7 = tf.nn.relu(conv7)

            conv8 = conv2d(conv7, 256,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv3_8"
                          )
            conv8 = batch_norm(conv8, name="conv_bn3_8")
            conv8 = tf.nn.relu(conv8)

            deconv1 = deconv2d(conv8, 4, [self.batch_size, int(input_shape[1]/2), int(input_shape[2]/2), 128], name="deconv3_1")
            deconv1 = batch_norm(deconv1, name="deconv_bn3_1")
            deconv1 = tf.nn.relu(deconv1)

            conv9 = conv2d(deconv1, 128,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv3_9"
                          )
            conv9 = batch_norm(conv9, name="conv_bn3_9")
            conv9 = tf.nn.relu(conv9)

            deconv2 = deconv2d(conv9, 4, [self.batch_size, input_shape[1], input_shape[2], 64], name="deconv3_2")
            deconv2 = batch_norm(deconv2, name="deconv_bn3_2")
            deconv2 = tf.nn.relu(deconv2)

            conv10 = conv2d(deconv2, 32,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv3_10"
                          )
            conv10 = batch_norm(conv10, name="conv_bn3_10")
            conv10 = tf.nn.relu(conv10)

            conv11 = conv2d(conv10, 3,
                          kernel=3,
                          stride=1,
                          padding="SAME",
                          name="conv3_11"
                          )
            conv11 = batch_norm(conv11, name="conv_bn3_11")
            conv11 = tf.nn.tanh(conv11)

            return conv11, nets

    @property
    def var(self):
        return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)


class D_net:
    def __init__(self, name):
        self.name = name
        self.batch_size = batch_size
    def __call__(self, input, reuse=False):
        nets = []
        with tf.variable_scope(self.name, reuse=reuse) as scope:
            conv1 = tf.contrib.layers.conv2d(input, 64, 5, 2,
                                     padding="VALID",
                                     activation_fn=None,
                                     scope="conv1")
            conv1 = batch_norm(conv1, name="bn1")
            conv1 = tf.nn.relu(conv1)
            nets.append(conv1)

            conv2 = tf.contrib.layers.conv2d(conv1, 128, 5, 2,
                                     padding="VALID",
                                     activation_fn=None,
                                     scope="conv2")
            conv2 = batch_norm(conv2, name="bn2")
            conv2 = tf.nn.relu(conv2)
            nets.append(conv2)

            conv3 = tf.contrib.layers.conv2d(conv2, 256, 5, 2,
                                     padding="VALID",
                                     activation_fn=None,
                                     scope="conv3")
            conv3 = batch_norm(conv1, name="bn3")
            conv3 = tf.nn.relu(conv3)
            nets.append(conv3)

            conv4 = tf.contrib.layers.conv2d(conv3, 512, 5, 2,
                                     padding="VALID",
                                     activation_fn=None,
                                     scope="conv4")
            conv4 = batch_norm(conv4, name="bn4")                                                                                                                           
            conv4 = tf.nn.relu(conv4)
            nets.append(conv4)

            flatten = tf.contrib.layers.flatten(conv4)
            output = linear(flatten, 1, name="linear")
            return output, nets
    @property
    def var(self):
        return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)


class T_train():
    def __init__(self,path):
        self.Y_r, self.masks, self.data_count = load_data(path)
        self.Y_r1, self.masks1, self.data_count1 = load_data(path)
        
        self.generator = Generator("generator")
        self.generator2 =Generator3("generator2")

        self.discriminator= D_net("discriminator")
        self.build_model()
        self.build_loss()

        #self.random_z = tf.placeholder(tf.float32, [None, Z_DIM])  # 根据生成网络 定义维度
        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) 
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        #self.cal_distance_sum = tf.summary.scalar("cal_distance", self.cal_distance)

        self.Y_r_sum = tf.summary.image("input_img", self.Y_r, max_outputs=5)  #协议缓冲区 
        self.X_g_sum = tf.summary.image("X_g1", self.X_g1, max_outputs=5)
        self.Y_g_sum = tf.summary.image("Y_g", self.Y_g, max_outputs=5)

        print('##############',np.shape(self.Y_r))

    def measurement_fn(self, input, name="measurement_fn"):
        with tf.variable_scope(name) as scope:
                return patch_generator(input, k_size=32)

    def build_model(self):
        #z = np.random.standard_normal([batch_size, Z_DIM])
        self.z_in = tf.get_variable("z_in", [1, Z_DIM], initializer=tf.random_normal_initializer())

        self.X_g1, self.g_nets = self.generator(self.masks)
        self.X_g1 = (1 - self.masks)*self.X_g1 + self.masks*self.Y_r

        self.X_g2, self.g_nets2 = self.generator2(self.masks)
        self.X_g2 = (1 - self.masks1)*self.X_g2 + self.masks1*self.Y_r1

        self.Y_g= self.X_g1*self.masks1 + (1 - self.masks1)*self.X_g2
        self.Y_g2 = self.X_g2
        #self.Y_g2 = self.X_g2*self.masks + (1 - self.masks)*self.X_g1
        
        self.fake_d_logits, self.fake_d_net = self.discriminator(self.Y_g)
        self.fake_d2_logits, self.fake2_d_net = self.discriminator(self.Y_g2, reuse=True)
        
        self.fake_d_X1_logits, self.fake2_d_net = self.discriminator(self.X_g1, reuse=True)

        self.real_d_logits, self.real_d_net = self.discriminator(self.Y_r, reuse=True)
        
        trainable_vars = tf.trainable_variables()
        
        self.g_vars = []
        self.d_vars = []
        self.g_vars2 = []
        self.d_vars2 = []
        self.psnr_vars=[]

        for var in trainable_vars:
            if "generator" in var.name:
                self.g_vars.append(var)
                self.g_vars2.append(var)
            else:
                self.d_vars.append(var)
                self.d_vars2.append(var)

    def build_loss(self):
        def calc_loss(logits, label):
            if label==1:
                y = tf.ones_like(logits)
            else:
                y = tf.zeros_like(logits)
            return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))

        #GAN loss
        #self.real_d_loss = calc_loss(self.real_d_logits, 1)
        #self.fake_d_loss = calc_loss(self.fake_d_logits, 0)

        #WGAN loss  more  stable 
        self.real_d_loss = tf.reduce_mean(self.real_d_logits+EPSILON)
        
        self.fake_d_loss = tf.reduce_mean(self.fake_d_logits+EPSILON)
        self.fake_d2_loss = tf.reduce_mean(self.fake_d2_logits+EPSILON)
        self.fake_d_X1_loss = tf.reduce_mean(self.fake_d_X1_logits+EPSILON)

        #self.d_loss = (self.fake_d_loss+self.fake_d2_loss)/2
        #self.g_loss = -self.fake_d_loss+self.fake_d2_loss

        self.d_loss = -self.fake_d2_loss+self.fake_d_loss
        self.g_loss = -self.fake_d_loss
        
        self.d_loss2 = -self.fake_d_X1_loss +self.fake_d2_loss
        self.g_loss2 = -self.fake_d2_loss

        self.psnr = cal_tf_psnr(self.X_g1,self.X_g2)
        self.psnr_loss = tf.reduce_mean(self.psnr)

def RT(sess, model):

    g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=momentum, name="AdamOptimizer_G").minimize(model.g_loss, var_list=model.g_vars)
    d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=momentum, name="AdamOptimizer_D").minimize(model.d_loss, var_list=model.d_vars)

    g_optimizer2 = tf.train.AdamOptimizer(learning_rate, beta1=momentum, name="AdamOptimizer_G2").minimize(model.g_loss2, var_list=model.g_vars2)
    d_optimizer2 = tf.train.AdamOptimizer(learning_rate, beta1=momentum, name="AdamOptimizer_D2").minimize(model.d_loss2, var_list=model.d_vars2)

    psnr_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=momentum, name="AdamOptimizer_psnr").minimize(model.psnr_loss)

    # clipping weights
    clip_D = [p.assign(tf.clip_by_value(p, -0.01, 0.01)) for p in model.d_vars]

    epoch = 0
    step = 0
    global_step = 0

    saver = tf.train.Saver() 
    if continue_training:
        last_ckpt = tf.train.latest_checkpoint(checkpoints_path+"/model")
        saver.restore(sess, last_ckpt)
        ckpt_name = str(last_ckpt)
        print("Loaded model file from " + ckpt_name)
        epoch = int(ckpt_name.split('-')[-1])
        tf.local_variables_initializer().run()
    else:
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    all_summary = tf.summary.merge([model.Y_r_sum,
                                    model.X_g_sum,
                                    model.Y_g_sum, 
                                    model.d_loss_sum,
                                    model.g_loss_sum])

    writer = tf.summary.FileWriter(graph_path, sess.graph)
    
    train_loss_G = []
    train_loss_D = []
    #training starts here
    d_loss =0
    g_loss=0
    i = 0

    while epoch < epochs:
        i = i+1
        #Update Discriminator
        if i%2==0:
            summary, d_loss, _ = sess.run([all_summary, model.d_loss, d_optimizer])
            writer.add_summary(summary, global_step)
            
            summary, g_loss, _ = sess.run([all_summary, model.g_loss, g_optimizer])
            writer.add_summary(summary, global_step)
            
            #Update Generator Again
            summary, g_loss, _ = sess.run([all_summary, model.g_loss, g_optimizer])
            writer.add_summary(summary, global_step)
            print("Epoch [%d] Step [%d] G Loss: [%.4f] D Loss: [%.4f]" % (epoch, step, g_loss, d_loss))
            
        if i%2!=0:
            summary, d_loss2, _ = sess.run([all_summary, model.d_loss2, d_optimizer2])
            writer.add_summary(summary, global_step)
            
            summary, g_loss2, _ = sess.run([all_summary, model.g_loss2, g_optimizer2])
            writer.add_summary(summary, global_step)#Update Generator
            
            #Update Generator Again
            summary, g_loss2, _ = sess.run([all_summary, model.g_loss2, g_optimizer2])
            #writer.add_summary(summary, global_step)
            print("Epoch [%d] Step [%d] G Loss2: [%.4f] D Loss2: [%.4f]" % (epoch, step, g_loss2, d_loss2))
        #summary, psnr_loss, _ = sess.run([all_summary, model.psnr_loss, psnr_optimizer])
        #writer.add_summary(summary, global_step)
        #g_loss=(g_loss+g_loss2)*0.5
        
        train_loss_D.append(d_loss)
        train_loss_G.append(g_loss)

        #print("Epoch [%d] Step [%d] PSNR Loss:"%(epoch, step), psnr_loss)
        #if step*args.batch_size >= model.data_count:
        if step % 5 == 0:
            if step*batch_size >= model.data_count:
                saver.save(sess, checkpoints_path + "/model", global_step=epoch)
            imgs = sess.run([model.Y_r,model.X_g1,model.X_g2,model.masks,model.Y_g,model.Y_g2])
            
            #saving image tile
            #img_save(epoch, imgs[0], name="Y_r")
            img_save(epoch, imgs[1], name="x_g1")
            img_save(epoch, imgs[2], name="x_g2")
            #img_save(epoch, imgs[3], name="mask")
            img_save(epoch, imgs[4], name="Y_g")
            img_save(epoch, imgs[5], name="Y_g2")
            
            step = 0
            epoch += 1
            #time.sleep(1)

        step += 1
        global_step += 1
    
    fig = plt.figure()
    ax =fig.add_subplot(1,1,1)
    ax.plot(np.arange(0,i,1),train_loss_D,'r--',label="D_loss")
    ax.plot(np.arange(0,i,1),train_loss_G,'b--',label = "G_loss")
    ax.set_title("loss view")
    ax.set_xlabel('iteration')
    ax.set_ylabel('loss')
    plt.show()
    
    coord.request_stop()
    coord.join(threads)
    sess.close()
    print("Done.")

if __name__=='__main__':

    #with tf.device('/gpu:0'):
    original_path = './'
    run_config = tf.ConfigProto(log_device_placement=True)
    #run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True
    #with tf.Session(config=run_config) as sess:
    #with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
    with tf.Session() as sess:
        model = T_train(original_path)
        
        images_path = os.path.join(images_path, measurement)
        graph_path = os.path.join(graph_path, measurement)
        checkpoints_path = os.path.join(checkpoints_path, measurement)

        #create graph, images, and checkpoints folder if they don't exist
        if not os.path.exists(checkpoints_path):
            os.makedirs(checkpoints_path)
        if not os.path.exists(graph_path):
            os.makedirs(graph_path)
        if not os.path.exists(images_path):
            os.makedirs(images_path)

        print('Start Training...')
        RT(sess, model)

