"""
author : DengXiuqi
date : 2018.10
email : dengxiuqi@163.com
"""

import tensorflow as tf
import numpy as np
import scipy.io
from config import vgg_path


def SRGAN_g(t_image, is_train=False, reuse=False):
    """ Generator """
    w_init = tf.random_normal_initializer(stddev=0.02)
    b_init = tf.constant_initializer(value=0.0)
    with tf.variable_scope("SRGAN_g", reuse=reuse) as vs:
        filter_num = 32 
        # n = tl.layers.InputLayer(t_image, name='in')
        n = t_image

        # 300 * 30=
        # n = tl.layers.Conv2d(n, filter_num, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='s1/c1')
        n = tf.layers.conv2d(n, filter_num, (3, 3), (1, 1),  padding='SAME', kernel_initializer=w_init, bias_initializer=b_init, name='s1/c1')
        n = tf.layers.batch_normalization(n, name='s1/b/c1')
        n = tf.nn.relu(n)
        n = tf.layers.conv2d(n, filter_num * 2, (3, 3), (1, 1), padding='SAME', kernel_initializer=w_init, bias_initializer=b_init, name='s1/c2')
        n = tf.layers.batch_normalization(n, name='s1/b/c2')
        n = tf.nn.relu(n)
        n = tf.layers.conv2d(n, filter_num * 4, (3, 3), (1, 1), activation=tf.nn.relu, padding='SAME', kernel_initializer=w_init, bias_initializer=b_init, name='s1/c3')
        # n = tf.layers.batch_normalization(n, name='s1/b/c3')
        temp = n

        # residual blacks of 3 x 3 convention
        for i in range(5):
            nn = tf.layers.conv2d(n, filter_num * 4, (3, 3), (1, 1), activation=None,
                                  padding='SAME', kernel_initializer=w_init,
                                  bias_initializer=b_init, name='r1/c1/%s' % i)
            nn = tf.layers.batch_normalization(nn, name='r1/b1/%s' % i)
            nn = tf.nn.relu(nn)
            nn = tf.layers.conv2d(nn, filter_num * 4, (3, 3), (1, 1), activation=None,
                                  padding='SAME', kernel_initializer=w_init,
                                  bias_initializer=b_init, name='r1/c2/%s' % i)
            nn = tf.layers.batch_normalization(nn, name='r1/b2/%s' % i)
            nn = tf.add(n, nn, name='r1_add/%s' % i)
            n = nn

        n = tf.layers.conv2d(n, filter_num * 4, (3, 3), (1, 1), activation=None,
                             padding='SAME', kernel_initializer=w_init,
                             bias_initializer=b_init, name='s1/c4')
        n = tf.layers.batch_normalization(n, name='s1/b/c4')
        n = tf.add(n, temp, name='s1/add1')
        n = tf.nn.relu(n)
        
        n = tf.layers.conv2d(n, filter_num * 2, (3, 3), (1, 1), padding='SAME', kernel_initializer=w_init, bias_initializer=b_init, name='s2/c1')
        n = tf.layers.batch_normalization(n, name='s2/b/c1')
        #temp = n
        # residual blacks end

        # residual blacks of 5 x 5 dilate convention
        #for i in range(2):
        #    nn = tf.layers.conv2d(n, filter_num * 2, (3, 3), (1, 1), activation=None,
        #                          dilation_rate=(2, 2), padding='SAME', kernel_initializer=w_init,
        #                          bias_initializer=b_init, name='r2/c1/%s' % i)
        #    nn = tf.layers.batch_normalization(nn, name='r2/b1/%s' % i)
        #    nn = tf.nn.relu(nn)
        #    nn = tf.layers.conv2d(nn, filter_num * 2, (3, 3), (1, 1), activation=None,
        #                          dilation_rate=(2, 2), padding='SAME', kernel_initializer=w_init,
        #                          bias_initializer=b_init, name='r2/c2/%s' % i)
        #    nn = tf.layers.batch_normalization(nn, name='r2/b2/%s' % i)
        #    nn = tf.add(n, nn, name='r2_add/%s' % i)
        #    n = nn

        #n = tf.layers.conv2d(n, filter_num * 2, (3, 3), (1, 1), activation=None,
        #                        padding='SAME', kernel_initializer=w_init,
        #                        bias_initializer=b_init, name='s2/c2')
        #n = tf.layers.batch_normalization(n, name='s2/b/c2')
        #n = tf.add(n, temp, name='add2')
        # residual blacks end

        n = tf.layers.conv2d(n, filter_num * 2, (3, 3), (1, 1), activation=None, padding='SAME',
                             kernel_initializer=w_init, name='s3/c1')
        n = tf.layers.batch_normalization(n, name='s3/b/c1')
        n = tf.nn.relu(n)
        n = tf.layers.conv2d(n, filter_num, (3, 3), (1, 1), activation=None, padding='SAME',
                             kernel_initializer=w_init, name='s3/c2')

        n = tf.layers.conv2d(n, 3, (1, 1), (1, 1), name='out')
        return n
