"""
License: Apache-2.0
Author: wentongxin
E-mail: flybywind@foxmail.com
"""

import tensorflow as tf
import logging
from config import cfg
from tensorflow.contrib.keras import layers as kly
from CapsuleLayer import CapsuleConv2dLayer, CapsuleRoutin1DLayer
import app_logger 
epsilon = 1e-9

logger = logging.getLogger("CapsNet")
app_logger.init(cfg.log_lvl)

class CapsNet(object):
    def __init__(self):
        '''
        For MNIST dataset
        '''
        self.graph = tf.Graph()
        self.primary_cap_num = 8
        self.digital_cap_num = 16
        self.class_num = 10
        with self.graph.as_default():
            global_step = tf.Variable(0, name = "global_step", trainable=False)
            self.learning_rate = tf.train.exponential_decay(cfg.init_lr, global_step,
                                               cfg.decay_step, cfg.decay_lr, staircase=False)
            with tf.variable_scope("CapsNetInput") as scope:
                self.X = tf.placeholder(tf.float32, (None, 28, 28, 1), name = "ImgX") 
                self.labels = tf.placeholder(tf.int32, (None, ), name = "ImgLabel")
                
            self.Y = tf.one_hot(self.labels, depth=self.class_num, axis=1, dtype=tf.float32)
            self.build_arch()
            self.loss()
            self._summary()

            # `global_step` will be automatically increased by one every time
            self.train_op = (
                tf.train.AdamOptimizer(self.learning_rate)
                    .minimize(self.total_loss, global_step=global_step)
                )

        logger.info('Seting up the main structure')

    def build_arch(self):
        with tf.variable_scope('Conv1_layer'):
            # Conv1, [batch_size, 20, 20, 256]
            conv1 = kly.Conv2D(256, kernel_size=9, strides=1,
                               activation="relu",
                               padding='VALID')(self.X)
            logger.debug("shape of conv1 = %r" % conv1.shape)
        # Primary Capsules layer, return [batch_size, 1152, 8]
        with tf.variable_scope('PrimaryCaps_layer'):
            self.primary_capsule = CapsuleConv2dLayer(
                cap_len_out = self.primary_cap_num,
                channel_num = 32, kernel_size = 9, 
                strides = 2)
#                 activation="relu",
            self.caps1 = self.primary_capsule(conv1)
            logger.debug("shape of primary_layer = %r" % self.caps1.shape)

        # DigitCaps layer, return [batch_size, 10, 16, 1]
        with tf.variable_scope('DigitCaps_layer'):
            self.digital_capsule = CapsuleRoutin1DLayer(
                output_dim = self.class_num, 
                 cap_len_in = self.primary_cap_num, 
                 cap_len_out = self.digital_cap_num, 
                 rout_iter = cfg.iter_routing,
                 initializer = tf.random_normal_initializer(0, stddev = cfg.stddev))
            self.caps2 = self.digital_capsule(self.caps1, 32)
            logger.debug("shape of digital_layer = %r" % self.caps2.shape)
            self.v_length = tf.sqrt(tf.reduce_sum(tf.square(self.caps2),
                                        axis=2) + epsilon, name = "v_length")
            self.argmax_idx = tf.to_int32(tf.argmax(self.v_length, axis = 1))
            
        with tf.variable_scope('Decoder'):
            mask_out = tf.multiply(self.caps2, 
                        tf.tile(tf.expand_dims(self.Y, -1), [1, 1, self.digital_cap_num]))
            left_target = tf.reduce_sum(mask_out, axis=1)
            fc1 = kly.Dense(512, activation="relu")(left_target)
            fc2 = kly.Dense(1024, activation="relu")(fc1)
            self.decoded = kly.Dense(784, activation=tf.sigmoid)(fc2)
        
        with tf.variable_scope('Accuracy'):
            correct_prediction = tf.to_float(tf.equal(self.labels, self.argmax_idx))
            self.batch_accuracy = tf.reduce_sum(correct_prediction)

    def loss(self):
        # 1. The margin loss
        # max_l = max(0, m_plus-||v_c||)^2
        max_l = tf.square(tf.maximum(0., cfg.m_plus - self.v_length))
        # max_r = max(0, ||v_c||-m_minus)^2
        max_r = tf.square(tf.maximum(0., self.v_length - cfg.m_minus))
        assert max_l.get_shape() == [cfg.batch_size, self.class_num]


        # calc T_c: [batch_size, 10]
        T_c = self.Y
        # [batch_size, 10], element-wise multiply
        L_c = T_c * max_l + cfg.lambda_val * (1 - T_c) * max_r

        self.margin_loss = tf.reduce_sum(L_c)
        
        # 2. The reconstruction loss
        orgin = tf.reshape(self.X, shape=(cfg.batch_size, -1))
        squared = tf.square(self.decoded - orgin)
        #squared = tf.maximum(0., 
                    #tf.square(self.decoded - orgin) - cfg.m_recons**2)
        self.reconstruction_err = cfg.regularization_scale * tf.reduce_sum(squared)/78.

        # 3. Total loss
        self.total_loss = self.margin_loss + self.reconstruction_err

    # Summary
    def _summary(self):
        train_summary = []
        train_summary.append(tf.summary.scalar('train/margin_loss', self.margin_loss))
        train_summary.append(tf.summary.scalar('train/reconstruction_loss', self.reconstruction_err))
        train_summary.append(tf.summary.scalar('train/total_loss', self.total_loss))
        train_summary.append(tf.summary.scalar('train/batch_accurace', self.batch_accuracy/cfg.batch_size))
        train_summary.append(tf.summary.scalar("train/learning_rate", self.learning_rate))
        recon_img = tf.reshape(self.decoded, shape=(cfg.batch_size, 28, 28, 1))
        train_summary.append(tf.summary.image('train_debug/origin_img', self.X))
        train_summary.append(tf.summary.image('train_debug/recons_img', recon_img))
        train_summary.append(tf.summary.histogram("train_debug/digital_capsule_length_idx", self.argmax_idx))
        train_summary.append(tf.summary.histogram("train_debug/digital_capsule_length_max", tf.reduce_max(self.v_length, -1)))
        train_summary.append(tf.summary.histogram("train_debug/digital_capsule_routin", self.digital_capsule.routin_b))
        train_summary.append(tf.summary.histogram("train_debug/digital_capsule_Weight", self.digital_capsule.W))
        
        self.test_accurace = tf.placeholder(dtype = tf.float32, name = "test_accuracy")
        train_summary.append(tf.summary.scalar("test/test_accurace", self.test_accurace))
        self.train_summary = tf.summary.merge(train_summary)
