"""Generative adversarial network."""

import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

from tf import contrib
from tf.contrib import layers

class Gan(object):
    """Adversary based generator network.
    """
    def __init__(self, ndims=784, nlatent=2):
        """Initializes a GAN

        Args:
            ndims(int): Number of dimensions in the feature.
            nlatent(int): Number of dimensions in the latent space.
        """

        self._ndims = ndims
        self._nlatent = nlatent
        # Learning rate.
        self.learning_rate_placeholder = tf.placeholder(tf.float32, [])

        # Input images
        self.x_placeholder = tf.placeholder(tf.float32, [None, ndims])

        # Input noise
        self.z_placeholder = tf.placeholder(tf.float32, [None, nlatent])

        # Build graph.
        self.x_hat = self._generator(self.z_placeholder, reuse=tf.AUTO_REUSE)
        y_hat = self._discriminator(self.x_hat)
        y = self._discriminator(self.x_placeholder, reuse=True)  # Use same variable (resue their values)

        # Discriminator loss
        self.d_loss = self._discriminator_loss(y, y_hat)

        # Generator loss
        self.g_loss = self._generator_loss(y_hat)

        # Add optimizers for appropriate variables
        # For discriminator.
        D_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "discriminator")
        self.D_train_op = tf.train.AdamOptimizer(self.learning_rate_placeholder).minimize(
                                self.d_loss, var_list=D_train_vars)
        # For generator.
        G_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "generator")
        self.G_train_op = tf.train.AdamOptimizer(self.learning_rate_placeholder).minimize(
                                self.g_loss, var_list=G_train_vars)
        

        # Create session
        self.session = tf.InteractiveSession()
        self.session.run(tf.global_variables_initializer())


    def _discriminator(self, x, reuse=False):
        """Discriminator block of the network.

        Args:
            x (tf.Tensor): The input tensor of dimension (None, 784).
            reuse (Boolean): re use variables with same name in scope instead of creating
              new ones, check Tensorflow documentation
        Returns:
            y (tf.Tensor): Scalar output prediction D(x) for true vs fake image(None, 1). 
              DO NOT USE AN ACTIVATION FUNCTION AT THE OUTPUT LAYER HERE.
              (if use sigmoid_cross_entropy_loss in _discriminator_loss, tf will do sigmoid automatically)

        """
        with tf.variable_scope("discriminator", reuse=reuse) as scope:
            y = None

            # Simple 2-layer fully connected net.
            # net1 = contrib.slim.fully_connected(x, 100, activation_fn=tf.nn.softplus)
            # net2 = contrib.slim.fully_connected(net1, 50, activation_fn=tf.nn.softplus)
            # y = contrib.slim.fully_connected(net2, 1, activation_fn=None)

            # y = contrib.slim.fully_connected(x, 1, activation_fn=None)

            # Dense linear hidden layer.
            net1 = tf.layers.dense(x, 128, activation=None)
            # Leaky ReLU.
            net1 = tf.maximum(net1, 0.01*net1)
            # Output.
            y = tf.layers.dense(net1, 1, activation=None)

            return y


    def _discriminator_loss(self, y, y_hat):
        """Loss for the discriminator.

        Args:
            y (tf.Tensor): The output tensor of the discriminator for true images of dimension (None, 1).
            y_hat (tf.Tensor): The output tensor of the discriminator for fake images of dimension (None, 1).
        Returns:
            l (tf.Scalar): average batch loss for the discriminator.

        """
        l = 0
        # Discriminator should be able to classify real image as "real" (prob=1).
        D_loss_real = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(y), logits=y)
        # Discriminator should be able to classify fake image as "fake" (prob=0).
        D_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(y_hat), logits=y_hat)
        # Sum above losses, shape (None, 1).
        D_loss_total = D_loss_real + D_loss_fake
        # Average batch loss.
        l = tf.reduce_mean(D_loss_total)

        return l


    def _generator(self, z, reuse=False):
        """From a sampled z, generate an image.

        Args:
            z(tf.Tensor): z from _sample_z of dimension (None, 2).
            reuse (Boolean): re use variables with same name in scope instead of creating
              new ones, check Tensorflow documentation 
        Returns:
            x_hat(tf.Tensor): Fake image G(z) (None, 784).
        """
        with tf.variable_scope("generator", reuse=reuse) as scope:
            x_hat = None

            # Similar to decoder in VAE. Use simple 2-layer net.
            # net1 = contrib.slim.fully_connected(z, 50, activation_fn=tf.nn.softplus)
            # net2 = contrib.slim.fully_connected(net1, 100, activation_fn=tf.nn.softplus)
            # x_hat = contrib.slim.fully_connected(net2, self._ndims, activation_fn=tf.nn.sigmoid)

            # x_hat = contrib.slim.fully_connected(z, self._ndims, activation_fn=tf.nn.sigmoid)

            # Dense linear hidden layer.
            net1 = tf.layers.dense(z, 128, activation=None)
            # Leaky ReLU.
            net1 = tf.maximum(net1, 0.01*net1)
            # Output.
            logits = tf.layers.dense(net1, self._ndims, activation=None)
            x_hat = tf.nn.sigmoid(logits)

            return x_hat


    def _generator_loss(self, y_hat):
        """Loss for the discriminator.

        Args:
            y_hat (tf.Tensor): The output tensor of the discriminator for fake images of dimension (None, 1).
        Returns:
            l (tf.Scalar): average batch loss for the discriminator.

        """
        l = 0
        # Generator wants to make fake image as "real" (prob=1), shape (None, 1).
        G_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(y_hat), logits=y_hat)
        # Average batch loss.
        l = tf.reduce_mean(G_loss)

        return l
    

    # Need to comment out.
    def generate_samples(self, z_np):
        """Generates random samples from the provided z_np.

        Args:
            z_np(numpy.ndarray): Numpy array of dimension
                (batch_size, _nlatent).

        Returns:
            out(numpy.ndarray): The sampled images (numpy.ndarray) of
                dimension (batch_size, _ndims).
        """
        out = None
        out = self.x_hat.eval(session=self.session, feed_dict={self.z_placeholder: z_np})
        return out
