"""
License: Apache-2.0
Author: wentongxin
E-mail: flybywind@foxmail.com
"""

import tensorflow as tf
import logging
from os import path
import traceback
from utils import *
from config import cfg
from functools import partial, reduce
from tensorflow.contrib import layers as L
import app_logger 
epsilon = 1e-9

logger = logging.getLogger("FadeNet") 

def leaky_relu(x):
    xp = tf.maximum(0., x)
    xn = tf.minimum(0., cfg.leak_relu_neg_slop * x)
    return tf.add(xp, xn, name = "LeakRelu")


class FadeNet():
    def __init__(self):
        self.conv_layers_filter = [16, 32, 64, 128, 256, 512]
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.global_step = tf.Variable(0, name = "global_step", trainable=False)
            self.lambda_enc = tf.minimum(tf.to_float(self.global_step) *
                                         cfg.max_lambda_e / cfg.slope_lambda_e, 
                              cfg.max_lambda_e, name = "Lambda_E")
            with tf.variable_scope("Input") as scope:
                self.X = tf.placeholder(tf.float32, (None, cfg.img_size, cfg.img_size, 3), name = "ImgX") 
                self.labels = tf.placeholder(tf.float32, (None, cfg.attr_num), name = "Attr")
                self.mat_labels = tf.placeholder(tf.float32, (None, 2, 2, cfg.attr_num), name = "AttrMat")
                self.is_training = tf.placeholder(tf.bool, name = "is_training_flag")
            self.conv_fn = partial(L.conv2d, 
                                   kernel_size=3, 
                                   stride=2, 
                                   padding='same',
                                   activation_fn = leaky_relu,
                                   normalizer_fn = L.batch_norm,
                                   normalizer_params = {"is_training": self.is_training})
            self.deconv_fn = partial(L.conv2d_transpose, 
                                     kernel_size=3, 
                                     stride=2, 
                                     padding='same',
                                     normalizer_fn = L.batch_norm,
                                     normalizer_params = {"is_training": self.is_training})

                
            self.train_op = []
            self.optimizer = tf.train.AdamOptimizer(cfg.lr, cfg.beta1)
            self.build_graph()
            self.build_loss()
            self._train_summary()
        logger.info('Seting up the FadeNet graph')
    
    def build_graph(self):
        # encoder
        with tf.variable_scope("encoder"):
            encoder = self.X 
            for fn in self.conv_layers_filter:
                encoder = self.conv_fn(encoder, fn,
                                       scope = "Enc_%d" % fn)
                logger.debug("encoder shape: %r" % encoder.shape.as_list())
            logger.debug("final encoder.shape = %r" % encoder.shape.as_list())
            assert(encoder.shape.as_list()[1:] == [2, 2, 512])
            self.encoder = encoder
        
        with tf.variable_scope("decoder"):
            decoder = tf.concat((self.encoder, self.mat_labels), axis=-1, name = "dec_con_y")
            logger.debug("decoder input shape: %r" % decoder.shape.as_list())
            assert(decoder.shape.as_list()[1:] == [2, 2, cfg.attr_num + 512])
            for fn in self.conv_layers_filter[:0:-1]:
                decoder = self.deconv_fn(decoder, fn + cfg.attr_num,
                                         scope = "Dec_%d" % fn)
                logger.debug("decoder shape: %r" % decoder.shape.as_list())
            decoder = self.deconv_fn(decoder, 3,
                                     scope = "Dec_final")
            logger.debug("decoder reconstruction.shape = %r" % decoder.shape.as_list())
            assert(decoder.shape.as_list()[1:] == [cfg.img_size, cfg.img_size, 3])
            # shift to [-1, 1]
            self.decoder = tf.clip_by_value(decoder - 1., -1., 1., name = "ReconImg")
            
        with tf.variable_scope("discriminator"):
            fc = self.conv_fn(self.encoder, 512, scope = "ConvDis")
            fc = tf.squeeze(fc, [1,2])
            fc = L.fully_connected(fc, 512)
            fc = L.dropout(fc, 0.3, is_training=self.is_training)
            self.attr_discr = L.fully_connected(fc, cfg.attr_num, activation_fn=tf.nn.sigmoid)
            logger.debug("discriminator shape = %r" % self.attr_discr.shape.as_list())
         
        with tf.variable_scope("classifier"):
            inpt = self.X 
            for fn in self.conv_layers_filter:
                inpt = self.conv_fn(inpt, fn,
                                   scope = "Cls_%d" % fn)
            inpt_shape = inpt.shape.as_list()
            flat_len = reduce(lambda x,y: x*y, inpt_shape[1:], 1)
            logger.debug("shape of final conv layer of cliassifier: %r" % inpt_shape)
            inpt = tf.reshape(inpt, [-1, flat_len])
            fc = L.fully_connected(inpt, 512)
            fc = L.dropout(fc, 0.2, is_training=self.is_training)
            self.attr_classifier = L.fully_connected(fc, cfg.attr_num, activation_fn=tf.nn.sigmoid)
            logger.debug("classifier shape = %r" % self.attr_classifier.shape.as_list())  
    
    def build_loss(self):
        train_variables = tf.trainable_variables()
        discr_vars = [v for v in train_variables if v.name.startswith('discriminator')]
        enc_dec_vars = [v for v in train_variables if v.name.startswith('encoder') or v.name.startswith('decoder')]
        class_vars = [v for v in train_variables if v.name.startswith('classifier')]
        logger.debug("variables for discriminator: %r" % discr_vars)
        logger.debug("variables for encoder/decoder: %r" % enc_dec_vars)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.variable_scope("loss_dis"):
            self.disc_error_elem = tf.abs(self.attr_discr - self.labels)
            self.disc_error_mae = tf.reduce_mean(self.disc_error_elem)
            self.loss_diss = tf.reduce_mean(
                               - tf.multiply(tf.log(self.attr_discr + epsilon), self.labels) - \
                                 tf.multiply(tf.log(1. - self.attr_discr + epsilon), 1. - self.labels)
                            )
            with tf.control_dependencies(update_ops):
                grad_vars = self.optimizer.compute_gradients(self.loss_diss, discr_vars)
                self.train_op.append(self.optimizer.apply_gradients(grad_vars))
                
        with tf.variable_scope("loss_dec_enc"):
            loss_diss_neg = tf.reduce_mean(
                                - tf.multiply(tf.log(self.attr_discr + epsilon), 1. - self.labels) - \
                                tf.multiply(tf.log(1. - self.attr_discr + epsilon), self.labels)
                            )
            self.reconstruction_err = tf.reduce_mean(tf.square(self.decoder - self.X))
            self.loss_dec_enc = self.reconstruction_err + self.lambda_enc * loss_diss_neg
            with tf.control_dependencies(update_ops):
                grad_vars = self.optimizer.compute_gradients(self.loss_dec_enc, enc_dec_vars)
                self.train_op.append(self.optimizer.apply_gradients(grad_vars, self.global_step))
            
        with tf.variable_scope("loss_classifier"):
            self.classfier_mae = tf.reduce_mean(tf.abs(self.attr_classifier - self.labels))
            self.loss_classifier = tf.reduce_mean(
                               - tf.multiply(tf.log(self.attr_classifier + epsilon), self.labels) - \
                                 tf.multiply(tf.log(1. - self.attr_classifier + epsilon), 1. - self.labels)
                                 )
            with tf.control_dependencies(update_ops):
                grad_vars = self.optimizer.compute_gradients(self.loss_classifier, class_vars)
                self.train_op.append(self.optimizer.apply_gradients(grad_vars))
                
                
    def _train_summary(self):
        train_summary = []
        train_summary.append(tf.summary.scalar('train/loss_diss', self.loss_diss))
        train_summary.append(tf.summary.scalar('train/loss_dec_enc', self.loss_dec_enc))
        train_summary.append(tf.summary.scalar('train/discr_error', self.disc_error_mae))
        train_summary.append(tf.summary.scalar('train/classifier_mae', self.classfier_mae))
        train_summary.append(tf.summary.scalar('train/reconstruction_loss', self.reconstruction_err))
        train_summary.append(tf.summary.scalar("train_debug/lambda_e", self.lambda_enc))
        train_summary.append(tf.summary.image('train_debug/origin_img', self.X))
        train_summary.append(tf.summary.image('train_debug/recons_img', self.decoder))
        train_summary.append(tf.summary.histogram("train_debug/encoder", self.encoder))
        train_summary.append(tf.summary.histogram("train_debug/discr_error", self.disc_error_elem))        
        self.train_summary = tf.summary.merge(train_summary)
    def valid_model(self, sess, sample, step):
        '''
        valid model, compute all batches in sample, and reconstruct 2 batches of images, save them to `imgdir`
        :param sess,  current tf session
        :param sample, type `utils.CelebSample`, valid celeb sample
        :param step,   type `int`, current step num
        '''
        sample_cnt = len(sample)
        error_recons = 0 
        error_swap = 0. 
        attr_err_mae = 0.
        rec_img_batces = []
        orig_img_batches = []
        for i in range(2):
            x, y0 = sample[i]
            yy0 = expand_attr2mat(y0)
            # swith with prob. 0.3
            switch_y = np.random.binomial(1, 0.5, y0.shape)
            y = y0 + switch_y
            y[y>1] = 0.
            yy = expand_attr2mat(y)
            err1 = sess.run(self.reconstruction_err, 
                                       {self.X: x, self.labels: y0, 
                                        self.mat_labels: yy0,
                                        self.is_training: False})
            rec_img = sess.run(self.decoder, 
                                       {self.X: x, self.labels: y, 
                                        self.mat_labels: yy,
                                        self.is_training: False})
            err2 = sess.run(self.classfier_mae, 
                                       {self.X: rec_img, self.labels: y, 
                                        self.mat_labels: yy,
                                        self.is_training: False})
            error_recons += err1
            error_swap += err2
            rec_img_batces.append(rec_img)
            orig_img_batches.append(x)
        # get img grid width & height 
        rec_imgs = np.concatenate(rec_img_batces)
        orig_imgs = np.concatenate(orig_img_batches)
        n = 2*cfg.batch_size
        w = int(np.sqrt(n))
        save_grid_imgs(rec_imgs, w, step, "reconstructed")
        save_grid_imgs(orig_imgs, w, step, "origin") 
        logger.debug("finish saving reconstructed images ...")
        for i in range(2, sample_cnt):
            x, y0 = sample[i]
            yy0 = expand_attr2mat(y0)
            # swith with prob. 0.3
            switch_y = np.random.binomial(1, 0.5, y0.shape)
            y = y0 + switch_y
            y[y>1] = 0.
            yy = expand_attr2mat(y)
            err1 = sess.run(self.reconstruction_err, 
                                       {self.X: x, self.labels: y0, 
                                        self.mat_labels: yy0,
                                        self.is_training: False})
            rec_img = sess.run(self.decoder, 
                                       {self.X: x, self.labels: y, 
                                        self.mat_labels: yy,
                                        self.is_training: False})
            err2 = sess.run(self.classfier_mae, 
                                       {self.X: rec_img, self.labels: y, 
                                        self.mat_labels: yy,
                                        self.is_training: False})
            error_recons += err1
            error_swap += err2
        error_recons /= sample_cnt
        error_swap /= sample_cnt
        if step == 0:
            with open(cfg.logdir + "/valid_loss.txt", "wt") as f:
                f.write("step,recon_err,swap_attr_cls_err\n%d,%.4e,%.4e\n" % (
                    step, error_recons, error_swap))
                f.flush()
        else:
            with open(cfg.logdir + "/valid_loss.txt", "at") as f:
                f.write("%d,%.4e,%.4e\n" % (step, error_recons, error_swap))
                f.flush()
        logger.debug("finish dump valid losses to txt file ...")
            
