
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import *


'''
    @breif fush batch normalization layer's params to up 
    layer conv's weights and bias
    @para[in]   layer       conv layer
'''
def fused_bn2conv(layer):

    if ('conv' in layer.name) and \
       ('batch_normalization' in layer.name):
        print("fusing batch normalization to {}".format(layer.name))
        conv_w = layer.get_weights()[0]
        conv_b = layer.get_weights()[1]
        print("original weights max:{} min: bias max:{} min:{}".format(
            conv_w.max(), conv_w.min(),
            conv_b.max(), conv_b.min()
        ))
        bn_layer    = layer._outbound_nodes[0].outbound_layer
        bn_beta     = bn_layer.get_weights()[0]
        bn_gamma    = bn_layer.get_weights()[1]
        bn_mean     = bn_layer.get_weights()[2]
        bn_variance = bn_layer.get_weights()[3]
        epsilon     = 1e-3

        if 'conv2d' in layer.name:
            if 'depthwise' in layer.name:
                for l in range(conv_w.shape[3]):
                    for k in range(conv_w.shape[2]):
                        for j in range(conv_w.shape[1]):
                            for i in range(conv_w.shape[0]):
                                conv_w[i][j][k][l] *= bn_gamma[k*conv_w.shape[3]+l] / np.sqrt(bn_variance[k*conv_w.shape[3]+l] + epsilon)
                depth_dim = conv_w.shape[2] * conv_w.shape[3]
            else:
                for l in range(conv_w.shape[3]):
                    for k in range(conv_w.shape[2]):
                        for j in range(conv_w.shape[1]):
                            for i in range(conv_w.shape[0]):
                                conv_w[i][j][k][l] *= bn_gamma[l] / np.sqrt(bn_variance[l] + epsilon)
                depth_dim = conv_w.shape[3]
            for l in range(depth_dim):
                conv_b[l] = (bn_gamma[l] * (conv_b[l] - bn_mean[l]) / np.sqrt (bn_variance[l] + epsilon)) + bn_beta[l]
        else:
            for k in range(conv_w.shape[2]):
                for j in range(conv_w.shape[1]):
                    for i in range(conv_w.shape[0]):
                        if "depthwise" in layer.name:
                            conv_w[i][j][k] *= bn_gamma[j] / np.sqrt(bn_variance[j] + epsilon)
                        else:
                            conv_w[i][j][k] *= bn_gamma[k] / np.sqrt(bn_variance[k] + epsilon)
            if "depthwise" in layer.name:
                depth_dim = conv_w.shape[1]*conv_w.shape[2]
            else:
                depth_dim = conv_w.shape[2]
            for l in range(depth_dim):
                conv_b[l] = (bn_gamma[l] * (conv_b[l] - bn_mean[l]) / np.sqrt(bn_variance[l] + epsilon)) + bn_beta[l]
        
        print('fused weight max', conv_w.max(), 'min', conv_w.min())
        print('fused bias max', conv_b.max(), 'min', conv_b.min())
        layer.set_weights([conv_w, conv_b])

def is_lstm_layer(layer):
    if type(layer) is LSTM or 'lstm' in layer.name:
        return True
    if(type(layer) is RNN or 'rnn' in layer.name):
        if(type(layer.cell) is LSTMCell or 'lstm' in layer.cell.name):
            return True
    return False

def is_rnn_layer(layer):
    if( 'rnn' in layer.name or
        is_lstm_layer(layer) or
        is_gru_layer(layer)
    ):
        return True
    return  False

def is_gru_layer(layer):
    if type(layer) is GRU or 'gru' in layer.name:
        return True
    if(type(layer) is RNN or 'rnn' in layer.name):
        if(type(layer.cell) is GRUCell or 'gru' in layer.cell.name):
            return True
    return False

def is_shift_layer(layer):
    if ('input' in layer.name) or \
       ('conv2d' in layer.name) or \
       ('conv1d' in layer.name) or \
       ('dense' in layer.name) or \
       ('softmax' in layer.name) or \
       ('sigmoid' in layer.name) or \
       ('tanh' in layer.name) or \
       ('add' in layer.name and 'zero' not in layer.name) or \
       ('subtract' in layer.name) or \
       ('multiply' in layer.name) or \
       ('activation' in layer.name and layer.get_config()['activation'] == 'softmax') or \
       ('activation' in layer.name and layer.get_config()['activation'] == 'hard_sigmoid') or \
       ('activation' in layer.name and layer.get_config()['activation'] == 'tanh') or \
       ('activation' in layer.name and layer.get_config()['activation'] == 'hard_tanh') or \
       is_rnn_layer(layer):
       return True
    return False

def tensor_name(t):
    return 'tensor_' + t.name.replace('/', '_').replace(':', '_')

def dec_bits_max_min(data, width=8):
    max = abs(data.max()) - abs(data.max()/pow(2, width))
    min = abs(data.min()) - abs(data.min()/pow(2, width))
    inb = int(np.ceil(np.log2(max(max, min))))
    deb = (width-1) - inb
    return deb

def quantize_weights(model, name='weights.h', layer_q_list=[]):

    for layer in model.layers:

        if not layer.weights:
            continue

        if ('batch_normalization' in layer.name) and \
           ('conv' not in layer.inbound_nodes[0].inbound_layers.name):
           raise Exception('Only support BN placed after conv')
        if ('conv' in layer.name) and \
           ('batch_normalization' in layer.outbound_nodes[0].outbound_layer.name):
           fused_bn2conv(layer)
        
        print('quantizing weights for layer {}'.format(layer.name))
        weights = layer.get_weights()
        for i, w in enumerate(weights):
            name = tensor_name(layer.weights[i])
            dec_bits = dec_bits_max_min(w, width=8)
            print('{} dec bit:{}'.format(dec_bits))

            if is_shift_layer(layer) and not is_rnn_layer(layer):
                

    with open(name, 'w') as fp:
        fp.write('#include "ednn.h"\n\n\n')
        fp.write('/* Q format weights and bias */\n')
