#! /usr/bin/python
# -*- coding: utf8 -*-

import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
import tensorflow.contrib.slim as slim

from config import config, log_config
# from tensorflow.python.ops import variable_scope as vs
# from tensorflow.python.ops import math_ops, init_ops, array_ops, nn
# from tensorflow.python.util import nest
# from tensorflow.contrib.rnn.python.ops import core_rnn_cell

# https://github.com/david-gpu/srez/blob/master/srez_model.py

def conv(x, num_out_layers, kernel_size, stride, activation_fn=tf.nn.elu):
    p = np.floor((kernel_size - 1) / 2).astype(np.int32)
    p_x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]])
    return slim.conv2d(p_x, num_out_layers, kernel_size, stride, 'VALID', activation_fn=activation_fn)

def upsample_nn(self, x, ratio):
    s = tf.shape(x)
    h = s[1]
    w = s[2]
    return tf.image.resize_nearest_neighbor(x, [h * ratio, w * ratio])

def upconvup(x, num_out_layers, kernel_size, scale):
    upsample = upsample_nn(x, scale)
    convconv = conv(upsample, num_out_layers, kernel_size, 1)
    return convconv

def deconv(self, x, num_out_layers, kernel_size, scale):
    p_x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
    conv = slim.conv2d_transpose(p_x, num_out_layers, kernel_size, scale, 'SAME')
    return conv[:,3:-1,3:-1,:]

def maxpool(x, kernel_size):
    p = np.floor((kernel_size - 1) / 2).astype(np.int32)
    p_x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]])
    return slim.max_pool2d(p_x, kernel_size)

def resconv(x, num_layers, stride):
    do_proj = tf.shape(x)[3] != num_layers or stride == 2
    shortcut = []
    conv1 = conv(x,         num_layers, 1, 1)
    conv2 = conv(conv1,     num_layers, 3, stride)
    conv3 = conv(conv2, 4 * num_layers, 1, 1, None)
    if do_proj:
        shortcut = conv(x, 4 * num_layers, 1, stride, None)
    else:
        shortcut = x
    return tf.nn.elu(conv3 + shortcut)

def resblock(x, num_layers, num_blocks):
    out = x
    for i in range(num_blocks - 1):
        out = resconv(out, num_layers, 1)
    out = resconv(out, num_layers, 2)
    return out

def get_disp(self, x):
    disp = 0.3 * self.conv(x, 2, 3, 1, tf.nn.sigmoid)
    return disp

def DEGAN_g(t_image, is_train=False, reuse=False):

    w_init = tf.random_normal_initializer(stddev=0.02)
    b_init = None # tf.constant_initializer(value=0.0)
    g_init = tf.random_normal_initializer(1., 0.02)
    with tf.variable_scope("SRGAN_g", reuse=reuse) as vs:
        tl.layers.set_name_reuse(reuse)
        n = InputLayer(t_image, name='in')

        ##################################### encoder begin ######################################
        n = Conv2d(n, 64, (7, 7), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, name='64Conv1H/2')
        skip1 = n
        n = Conv2d(n, 64, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, name='64Poll1H/4')

        skip2 = n

        # todo Res 1
        ## todo L = 1 S = 1
        nn = Conv2d(n, 64, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/c1/1L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res1/b1/1L')
        nn = Conv2d(nn, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/c2/1L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res1/b2/1L')
        nn = Conv2d(nn, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/c3/1L')
        sc = Conv2d(n, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/csc/1L')
        n = tf.nn.elu(nn+sc)

        ## todo L = 2 S = 1
        nn = Conv2d(n, 64, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/c1/2L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res1/b1/2L')
        nn = Conv2d(nn, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/c2/2L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res1/b2/2L')
        nn = Conv2d(nn, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/c3/2L')
        n = tf.nn.elu(nn+n)

        ## todo L = 3 S = 2
        nn = Conv2d(n, 64, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/c1/3L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res1/b1/3L')
        nn = Conv2d(nn, 64, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/c2/3L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res1/b2/3L')
        nn = Conv2d(nn, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/c3/3L')
        sc = Conv2d(n, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res1/csc/3L')
        n = tf.nn.elu(nn+sc)

        skip3 = n

        # todo Res 2
        ## todo L = 1 S = 1
        nn = Conv2d(n, 128, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c1/1L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res2/b1/1L')
        nn = Conv2d(nn, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c2/1L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res2/b2/1L')
        nn = Conv2d(nn, 512, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c3/1L')
        sc = Conv2d(n, 512, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/csc/1L')
        n = tf.nn.elu(nn + sc)

        ## todo L = 2 S = 1
        nn = Conv2d(n, 128, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c1/2L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res2/b1/2L')
        nn = Conv2d(nn, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c2/2L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res2/b2/2L')
        nn = Conv2d(nn, 512, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c3/2L')
        n = tf.nn.elu(nn + n)

        ## todo L = 3 S = 1
        nn = Conv2d(n, 128, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c1/3L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res2/b1/3L')
        nn = Conv2d(nn, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c2/3L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res2/b2/3L')
        nn = Conv2d(nn, 512, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c3/3L')
        n = tf.nn.elu(nn + n)

        ## todo L = 4 S = 2
        nn = Conv2d(n, 128, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c1/4L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res2/b1/4L')
        nn = Conv2d(nn, 128, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c2/4L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res2/b2/4L')
        nn = Conv2d(nn, 512, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/c3/4L')
        sc = Conv2d(n, 512, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res2/csc/4L')
        n = tf.nn.elu(nn + sc)

        skip4 = n

        # todo Res 3
        ## todo L = 1 S = 1
        nn = Conv2d(n, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c1/1L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b1/1L')
        nn = Conv2d(nn, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c2/1L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b2/1L')
        nn = Conv2d(nn, 1024, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c3/1L')
        sc = Conv2d(n, 1024, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/csc/1L')
        n = tf.nn.elu(nn + sc)

        ## todo L = 2 S = 1
        nn = Conv2d(n, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c1/2L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b1/2L')
        nn = Conv2d(nn, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c2/2L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b2/2L')
        nn = Conv2d(nn, 1024, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c3/2L')
        n = tf.nn.elu(nn + n)

        ## todo L = 3 S = 1
        nn = Conv2d(n, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c1/3L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b1/3L')
        nn = Conv2d(nn, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init,name='res3/c2/3L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b2/3L')
        nn = Conv2d(nn, 1024, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init,name='res3/c3/3L')
        n = tf.nn.elu(nn + n)

        ## todo L = 4 S = 1
        nn = Conv2d(n, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c1/4L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b1/3L')
        nn = Conv2d(nn, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init,name='res3/c2/4L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b2/3L')
        nn = Conv2d(nn, 1024, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init,name='res3/c3/4L')
        n = tf.nn.elu(nn + n)

        ## todo L = 5 S = 1
        nn = Conv2d(n, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c1/5L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b1/3L')
        nn = Conv2d(nn, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init,name='res3/c2/5L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b2/3L')
        nn = Conv2d(nn, 1024, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init,name='res3/c3/5L')
        n = tf.nn.elu(nn + n)

        ## todo L = 6 S = 2
        nn = Conv2d(n, 256, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c1/6L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b1/6L')
        nn = Conv2d(nn, 256, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c2/6L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res3/b2/6L')
        nn = Conv2d(nn, 1024, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/c3/6L')
        sc = Conv2d(n, 1024, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res3/csc/6L')
        n = tf.nn.elu(nn + sc)

        skip5 = n

        # todo Res 4
        ## todo L = 1 S = 1
        nn = Conv2d(n, 512, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/c1/1L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res4/b1/1L')
        nn = Conv2d(nn, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/c2/1L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res4/b2/1L')
        nn = Conv2d(nn, 2048, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/c3/1L')
        sc = Conv2d(n, 2048, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/csc/1L')
        n = tf.nn.elu(nn+sc)

        ## todo L = 2 S = 1
        nn = Conv2d(n, 512, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/c1/2L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res4/b1/2L')
        nn = Conv2d(nn, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/c2/2L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res4/b2/2L')
        nn = Conv2d(nn, 2048, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/c3/2L')
        n = tf.nn.elu(nn+n)

        ## todo L = 3 S = 2
        nn = Conv2d(n, 512, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/c1/3L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res4/b1/3L')
        nn = Conv2d(nn, 512, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/c2/3L')
        nn = BatchNormLayer(nn, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='res4/b2/3L')
        nn = Conv2d(nn, 2048, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/c3/3L')
        sc = Conv2d(n, 2048, (1, 1), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='res4/csc/3L')
        n = tf.nn.elu(nn+sc)

        ############################### encoder end ##################################
        ############################### decoder begin ##################################

        
        n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='n64s1/c/m')
        n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='n64s1/b/m')
        n = ElementwiseLayer([n, temp], tf.add, 'add3')


        n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, name='n256s1/1')
        n = SubpixelConv2d(n, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/1')

        n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, name='n256s1/2')
        n = SubpixelConv2d(n, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/2')

        n = Conv2d(n, 3, (1, 1), (1, 1), act=tf.nn.tanh, padding='SAME', W_init=w_init, name='out')
        return n



def DEGAN_d(input_images, is_train=True, reuse=False):
    w_init = tf.random_normal_initializer(stddev=0.02)
    b_init = None # tf.constant_initializer(value=0.0)
    gamma_init=tf.random_normal_initializer(1., 0.02)
    df_dim = 64
    lrelu = lambda x: tl.act.lrelu(x, 0.2)
    with tf.variable_scope("DEGAN_d", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        net_in = InputLayer(input_images, name='input/images')
        net_h0 = Conv2d(net_in, df_dim, (4, 4), (2, 2), act=lrelu,
                padding='SAME', W_init=w_init, name='h0/c')

        net_h1 = Conv2d(net_h0, df_dim*2, (4, 4), (2, 2), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='h1/c')
        net_h1 = BatchNormLayer(net_h1, act=lrelu, is_train=is_train,
                gamma_init=gamma_init, name='h1/bn')
        net_h2 = Conv2d(net_h1, df_dim*4, (4, 4), (2, 2), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='h2/c')
        net_h2 = BatchNormLayer(net_h2, act=lrelu, is_train=is_train,
                gamma_init=gamma_init, name='h2/bn')
        net_h3 = Conv2d(net_h2, df_dim*8, (4, 4), (2, 2), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='h3/c')
        net_h3 = BatchNormLayer(net_h3, act=lrelu, is_train=is_train,
                gamma_init=gamma_init, name='h3/bn')
        net_h4 = Conv2d(net_h3, df_dim*16, (4, 4), (2, 2), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='h4/c')
        net_h4 = BatchNormLayer(net_h4, act=lrelu, is_train=is_train,
                gamma_init=gamma_init, name='h4/bn')
        net_h5 = Conv2d(net_h4, df_dim*32, (4, 4), (2, 2), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='h5/c')
        net_h5 = BatchNormLayer(net_h5, act=lrelu, is_train=is_train,
                gamma_init=gamma_init, name='h5/bn')
        net_h6 = Conv2d(net_h5, df_dim*16, (1, 1), (1, 1), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='h6/c')
        net_h6 = BatchNormLayer(net_h6, act=lrelu, is_train=is_train,
                gamma_init=gamma_init, name='h6/bn')
        net_h7 = Conv2d(net_h6, df_dim*8, (1, 1), (1, 1), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='h7/c')
        net_h7 = BatchNormLayer(net_h7, is_train=is_train,
                gamma_init=gamma_init, name='h7/bn')

        net = Conv2d(net_h7, df_dim*2, (1, 1), (1, 1), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='res/c')
        net = BatchNormLayer(net, act=lrelu, is_train=is_train,
                gamma_init=gamma_init, name='res/bn')
        net = Conv2d(net, df_dim*2, (3, 3), (1, 1), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='res/c2')
        net = BatchNormLayer(net, act=lrelu, is_train=is_train,
                gamma_init=gamma_init, name='res/bn2')
        net = Conv2d(net, df_dim*8, (3, 3), (1, 1), act=None,
                padding='SAME', W_init=w_init, b_init=b_init, name='res/c3')
        net = BatchNormLayer(net, is_train=is_train,
                gamma_init=gamma_init, name='res/bn3')
        net_h8 = ElementwiseLayer(layer=[net_h7, net],
                combine_fn=tf.add, name='res/add')
        net_h8.outputs = tl.act.lrelu(net_h8.outputs, 0.2)

        net_ho = FlattenLayer(net_h8, name='ho/flatten')
        net_ho = DenseLayer(net_ho, n_units=1, act=tf.identity,
                W_init = w_init, name='ho/dense')
        logits = net_ho.outputs
        net_ho.outputs = tf.nn.sigmoid(net_ho.outputs)

    return net_ho, logits