# -*- encoding: utf8 -*- 
"""
License: Apache-2.0
Author:  Wentongxin
E-mail: flybywind@foxmail.com
"""
import numpy as np
import tensorflow as tf
from tensorflow.contrib.keras import layers as kly
from tensorflow.python.ops.init_ops import glorot_normal_initializer
from config import cfg
import logging
import app_logger 

epsilon = 1e-9

app_logger.init(cfg.log_lvl)
logger = logging.getLogger("CapsuleLayer")

def squash(vector):
    '''Squashing function corresponding to Eq. 1
    Args:
        vector: A tensor with shape [?, ..., vec_len].
    Returns:
        A tensor with the same shape as vector but squashed in 'vec_len' dimension.
    '''
    vec_squared_norm = tf.reduce_sum(tf.square(vector), -1, keep_dims=True)
    scalar_factor = vec_squared_norm / (1 + vec_squared_norm) / tf.sqrt(vec_squared_norm + epsilon)
    vec_squashed = tf.multiply(scalar_factor, vector, name = "Squash_v")  # element-wise
    return(vec_squashed)    

class CapsuleConv2dLayer():
    __id = 0
    def __init__(self, 
               cap_len_out,
               channel_num,
               **conv2d_params):
        ''' CapsuleConv2dLayer, it is simply a conv2d layer, except that it's number of output channel will be cap_len_out * channel_num
        and its output will be reshaped to batch_size x cap_num x cap_len_out, where cap_num = width x height x channel_num
        Args:
            cap_len_out:  int, output capsule length
            channel_num:  int, number of output layers of capsule conv2d 
        '''
        self.cap_len_out = cap_len_out
        self.channel_num = channel_num 
        self.conv2d_layer = kly.Conv2D(filters = cap_len_out * channel_num, **conv2d_params)
        
        CapsuleConv2dLayer.__id += 1
        
    def __call__(self, inpt):
        '''
        Args:
            inpt:   input tensor
        
        Return:
            an tensor defining a graph of CapsuleConv2dLayer
        '''
        with tf.variable_scope("CapsuleConv2dLayer_%d" % CapsuleConv2dLayer.__id):
            inner_layer = self.conv2d_layer(inpt)
            _, w, h, _ = inner_layer.shape
            cap_layer = tf.reshape(inner_layer, [-1, (w*h*self.channel_num).value, self.cap_len_out])
        return squash(cap_layer) 
  

class CapsuleRoutin1DLayer():
    __id = 0
    def __init__(self, output_dim, 
                 cap_len_in, cap_len_out, 
                 rout_iter = 3,
                 initializer = glorot_normal_initializer()):
        ''' Capsule Dynamically routing Layer, must follow a CapsuleConv2dLayer,
        just used as final classified layer
        '''
        self.output_dim = output_dim
        self.cap_len_in = cap_len_in 
        self.cap_len_out = cap_len_out 
        self.rout_iter = rout_iter - 1
        assert(self.rout_iter >= 0)
        self.initializer = initializer
        CapsuleRoutin1DLayer.__id += 1
        self.scope = "CapsuleRoutin1DLayer_%d" % CapsuleRoutin1DLayer.__id 
    
    def __call__(self, inpt, channel_num):
        inpt_shape = inpt.get_shape()
        self.cap_num = inpt_shape[1].value
        self.channel_num = channel_num
        self.channel_size = self.cap_num // self.channel_num
        self.inpt = inpt
        logger.debug("Input shape: %r" % inpt_shape)
        # [batch_size,  cap_num, cap_len_in]
        with tf.variable_scope(self.scope) as scope:
            self.W = tf.get_variable("Wcapsule", 
                                shape = (1, self.channel_num, self.output_dim, self.cap_len_in, self.cap_len_out), 
                                initializer = self.initializer)
            # init dynamically
            # each sample in batch should have different routin weight
            self.routin_b0 = tf.constant(np.zeros((cfg.batch_size, self.cap_num, self.output_dim, 1, 1)), name="routin_b", dtype = tf.float32)
            v, routin_b = self.__iter_routing_op()
            self.routin_b = routin_b
            
        return v

    def __iter_routing_op(self):
        # [batch_size, cap_num, 1, 1, cap_len_in]
        inpt_exp = tf.expand_dims(tf.expand_dims(self.inpt, -2), -2)
        inpt_ti = tf.tile(inpt_exp, [1, 1, self.output_dim, 1, 1])
        # TODO:
        # since the first dimension of W_ti need to known in advance, 
        # same for the following `tf.tile` with cfg.batch_size as its first param,
        # the test set need predicted in the same batch as training 
        # Can do it dynamically ??
        W_ti = tf.reshape(
                tf.tile(self.W, [cfg.batch_size, 1, self.channel_size, 1 , 1]),
                (cfg.batch_size, self.cap_num, self.output_dim, self.cap_len_in, self.cap_len_out),
                "W_share_channel")
        logger.debug("WeightMatrix tiled shape = %r" % W_ti.shape)
        logger.debug("input tiled shape = %r" % inpt_ti.shape)
        u_hat_exp = tf.matmul(inpt_ti, W_ti)
        #         u_hat = tf.squeeze(u_hat_exp)
        # batch_size, cap_num, cap_len_out
        #         logger.debug("U_hat shape = %r" % (u_hat.shape))
        # batch_size, cap_num, output_dim, 1, cap_len_out
        logger.debug("U_hat_exp shape = %r" % (u_hat_exp.shape))
        
        with tf.variable_scope("routin_iter"):
            routin_b = self.routin_b0
            logger.debug("routin_b shape = %r" % (routin_b.shape))
            for i in range(self.rout_iter):
                # batch_size, cap_num, output_dim, 1, cap_len_out
                routing_c = tf.tile(tf.nn.softmax(routin_b, dim = 2), 
                                    [1, 1, 1, 1, self.cap_len_out], 
                                    name = "routin_c")
                logger.debug("routing_c shape = %r" % (routing_c.shape))
                # batch_size, 1, output_dim, 1, cap_len_out
                s = tf.reduce_sum(tf.multiply(routing_c, u_hat_exp, name = "s_j"), 
                                  axis = 1, keep_dims = True, name = "S")
                v = squash(s)
                logger.debug("internal v shape = %r" % v.shape)
                # batch_size, cap_num, output_dim, 1, cap_len_out
                v_1 = tf.tile(v, [1, self.cap_num, 1, 1, 1])
                routin_b += tf.matmul(u_hat_exp, v_1, 
                                      transpose_b=True, name = "u_dot_v")
                logger.debug("after update, routing_b shape = %r" % routin_b.shape)
            
            routing_c = tf.stop_gradient(
                            tf.tile(tf.nn.softmax(routin_b, dim = 2), 
                                [1, 1, 1, 1, self.cap_len_out]),
                                name = "final_routin_c")
            s = tf.reduce_sum(tf.multiply(routing_c, u_hat_exp, name = "s_j"), 
                                         axis = 1, name = "S")
            v = tf.squeeze(squash(s), name = 'final_v')
        return  v, routin_b
