# -*- coding: utf-8 -*-
# @Author: lidongdong
# @time  : 18-12-16 下午9:48
# @file  : Model.py

import tensorflow as tf
from utils.operations import deconv2d, conv2d, BatchNorm


class FGan(object):
    """
    改改模型只让它生成flower就可以了，看看模型是否能够work!!!
    """
    def __init__(self, config):
        self.config = config
        self.img_size = self.config.size.image_size
        self.real_image = self.noise = None
        self.fake_image = None

    def build_model(self, training=True):
        self.init_placeholder()

        # generator fake image
        self.fake_image = self.generator(self.noise, self.text_embedding, training=training)
        tf.summary.image("real_image", self.real_image,max_outputs=3)
        # discriminator
        disc_real_image, disc_real_image_logits = self.discriminator(self.real_image, self.text_embedding, training=training)
        disc_wrong_image, disc_wrong_image_logits = self.discriminator(self.wrong_image, self.text_embedding, reuse=True, training=training)
        disc_fake_image, disc_fake_image_logits = self.discriminator(self.fake_image, self.text_embedding, reuse=True, training=training)

        # loss
        # generator 尽可能的将生成的image真实性提高
        g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake_image_logits, labels=tf.ones_like(disc_fake_image)))
        # discriminator 需要提高real_image的真实度，降低wrong_image, fake_image的真实度
        d_loss1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real_image_logits, labels=tf.ones_like(disc_real_image) * 0.9))
        d_loss2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_wrong_image_logits, labels=tf.zeros_like(disc_wrong_image)))
        d_loss3 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake_image_logits, labels=tf.zeros_like(disc_fake_image)))
        d_loss = d_loss1 + d_loss2 + d_loss3

        # variables
        # 既然是这种方法，为什么不在最开始的地方加 variable_scope 呢
        t_vars = tf.trainable_variables()
        d_vars = [var for var in t_vars if var.name.startswith("discriminator")]
        g_vars = [var for var in t_vars if var.name.startswith("generator")]

        for v in t_vars:
            print v.name, v.shape

        assert (len(d_vars) + len(g_vars)) == len(t_vars), "d_vars({}) add g_vars({}) equals to t_vars({})".format(len(d_vars), len(g_vars), len(t_vars))
        # returns
        input_tensors = {
            "real_image": self.real_image,
            "wrong_image": self.wrong_image,
            "noise": self.noise,
            "text_embedding": self.text_embedding
        }

        variables = {
            "d_vars": d_vars,
            "g_vars": g_vars
        }

        losses = {
            "d_loss": d_loss,
            "g_loss": g_loss
        }

        outputs = {
            "fake_image": self.fake_image
        }

        checks = {
            "d_loss1": d_loss1,
            "d_loss2": d_loss2,
            "d_loss3": d_loss3,
            "disc_real_image_logits": disc_real_image_logits,
            "disc_fake_image_logits": disc_fake_image_logits
        }

        return input_tensors, variables, losses, outputs, checks

    def init_placeholder(self):
        # placeholder
        self.real_image = tf.placeholder(tf.float32, [None, self.img_size, self.img_size, 3], name="real_image")
        self.wrong_image = tf.placeholder(tf.float32, [None, self.img_size, self.img_size, 3], name="wrong_image")
        self.noise = tf.placeholder(tf.float32, [None, self.config.dimensions.z_dim])
        self.text_embedding = tf.placeholder(tf.float32, [None, self.config.dimensions.caption_vec_length])

    def generator(self, noise, text_embedding, training=True):
        """给定了noise, caption, 给出fake image

        所有的包含weight的operation需要设定为g_开头，这样在后来的weight update中区分
        """
        with tf.variable_scope("generator"):
            s, s2, s4, s8, s16 = self.img_size, int(self.img_size / 2), int(self.img_size / 4), int(self.img_size / 8),\
                                 int(self.img_size / 16)
            reduced_text_embedding = tf.nn.leaky_relu(tf.layers.dense(text_embedding, self.config.dimensions.t_dim,
                                                     kernel_initializer=tf.glorot_normal_initializer(),
                                                     name="g_reduce_text_embedding"))
            noise = tf.concat([noise, reduced_text_embedding], -1)
            densed_noise = tf.layers.dense(noise, self.config.dimensions.gf_dim * 8 * s16 * s16, name="g_concat",
                                           kernel_initializer=tf.glorot_normal_initializer())
            gf_dim = self.config.dimensions.gf_dim
            batch_size_tensor = tf.shape(noise)[0]

            h0 = tf.reshape(densed_noise, [-1, s16, s16, self.config.dimensions.gf_dim * 8])
            h0 = tf.nn.relu(tf.layers.batch_normalization(h0, name="g_bn0", training=training))

            h1 = deconv2d(h0, [batch_size_tensor, s8, s8, gf_dim * 4], name="g_h1")
            h1 = tf.nn.relu(tf.layers.batch_normalization(h1, name="g_bn1", training=training))

            h2 = deconv2d(h1, [batch_size_tensor, s4, s4, gf_dim * 2], name="g_h2")
            h2 = tf.nn.relu(tf.layers.batch_normalization(h2, name="g_bn2", training=training))

            h3 = deconv2d(h2, [batch_size_tensor, s2, s2, gf_dim * 1], name="g_h3")
            h3 = tf.nn.relu(tf.layers.batch_normalization(h3, name="g_bn3", training=training))

            h4 = deconv2d(h3, [batch_size_tensor, s, s, 3], name="g_h4")
            # 经验性设置为 tanh
            return tf.tanh(h4)

    def discriminator(self, image, text_embedding, reuse=False, training=True):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            # image convolution
            h0 = tf.nn.leaky_relu(conv2d(image, self.config.dimensions.df_dim, name="d_h0_conv"))   # 32
            h1 = tf.nn.leaky_relu(tf.layers.batch_normalization(conv2d(h0, self.config.dimensions.df_dim * 2, name="d_h1_conv"), name="d_bn1", training=training))  # 16
            h2 = tf.nn.leaky_relu(tf.layers.batch_normalization(conv2d(h1, self.config.dimensions.df_dim * 4, name="d_h2_conv"), name="d_bn2", training=training))  # 8
            h3 = tf.nn.leaky_relu(tf.layers.batch_normalization(conv2d(h2, self.config.dimensions.df_dim * 8, name="d_h3_conv"), name="d_bn3", training=training))  # 4

            reduced_text_embedding = tf.nn.leaky_relu(tf.layers.dense(text_embedding, self.config.dimensions.t_dim,
                                                                      kernel_initializer=tf.glorot_normal_initializer(),
                                                                      name="d_reduce_text_embedding"))
            reduced_text_embedding = tf.expand_dims(reduced_text_embedding, 1)
            reduced_text_embedding = tf.expand_dims(reduced_text_embedding, 1)
            reduced_text_embedding = tf.tile(reduced_text_embedding, [1, 4, 4, 1])
            h3_concat = tf.concat([h3, reduced_text_embedding], axis=-1)
            # text embedding    [batch_size, 1, 1, t_dim]
            h3_new = tf.nn.leaky_relu(tf.layers.batch_normalization(conv2d(h3_concat,
                                                                           self.config.dimensions.df_dim, 1, 1, 1, 1,
                                                                           name="d_h3_new"),
                                                                    name="d_bn4",
                                                                    training=training))

            fc_dim = h3_new.shape[1].value * h3_new.shape[2].value * h3_new.shape[3].value
            h3_new = tf.reshape(h3_new, shape=[tf.shape(image)[0], fc_dim])
            h4 = tf.layers.dense(h3_new, 1, name="d_h4", kernel_initializer=tf.glorot_normal_initializer())

            return tf.nn.sigmoid(h4), h4

