import os
import sys
import json
import mxnet as mx
import numpy as np
from easydict import EasyDict as edict
os.environ['GLOG_minloglevel'] = '1'
sys.path.insert(0, '/home/yf/test/caffe/python')
import caffe
from caffe.proto import caffe_pb2
from google.protobuf import text_format
caffe.set_mode_cpu()

__all__ = ['convert_net', 'convert_model', 'convert_mv', 'convert_bgr',
'merge_bn_proto', 'merge_bn_model']

def add_kernel_stride_pad(kwargs, attrs):
    for key in ['kernel', 'stride', 'pad']:
        attr = eval(attrs[key])
        if attr[0] == attr[1]:
            name = key + '_size' if key == 'kernel' else key
            kwargs[name] = attr[0]
        else:
            kwargs[key + '_h'] = attr[0]
            kwargs[key + '_w'] = attr[1]

def convert_net(symbol, prototxt, size):
    with open(symbol, 'r') as sym:
        nodes = json.load(sym)['nodes']
    layers = []
    bottoms = []
    for node in nodes:
        node = edict(node)
        attrs = edict(getattr(node, 'attrs', {}))
        if node.op == 'null':
            if node['name'] == 'data':
                layers.append(caffe.layers.Input(name='data', shape={'dim': [1, 3, size, size]}))
            else:
                layers.append(None)
                continue
        elif node.op == 'Convolution':
            kwargs = {}
            kwargs['name'] = node.name.replace('_fwd', '')
            kwargs['num_output'] = eval(attrs.num_filter)
            kwargs['group'] = eval(getattr(attrs, 'num_group', '1'))
            add_kernel_stride_pad(kwargs, attrs)
            bottom = node['inputs'][0][0]
            if eval(getattr(attrs, 'no_bias', 'False')):
                kwargs['bias_term'] = False
            if kwargs['num_output'] == kwargs['group']:
                op = 'Convolution' #'ConvolutionDepthwise'
            else:
                op = 'Convolution'
            layers.append(getattr(caffe.layers, op)(layers[bottom], **kwargs))
            bottoms.append(bottom)
        elif node.op == 'FullyConnected':
            bottom = node['inputs'][0][0]
            layers.append(caffe.layers.InnerProduct(layers[bottom], name=node.name.replace('_fwd', ''),
                          num_output=eval(attrs.num_hidden)))
            bottoms.append(bottom)
        elif node.op == 'Pooling':
            kwargs = {}
            kwargs['name'] = node.name.replace('_fwd', '')
            add_kernel_stride_pad(kwargs, attrs)
            if attrs.pool_type == 'max':
                kwargs['pool'] = caffe.params.Pooling.MAX
            elif attrs.pool_type == 'avg':
                kwargs['pool'] = caffe.params.Pooling.MAX
            else:
                raise ValueError('unsupported pool type: {}'.format(attrs.pool_type))
            if attrs.pooling_convention == 'full':
                pass
            elif attrs.pooling_convention == 'valid':
                kwargs['round_mode'] = caffe.params.Pooling.FLOOR
            else:
                raise ValueError('unsupported pool convention: {}'.format(attrs.pooling_convention))    
            if eval(getattr(attrs, 'global_pool', 'False')):
                kwargs['global_pooling'] = True
            bottom = node['inputs'][0][0]
            layers.append(caffe.layers.Pooling(layers[bottom], **kwargs))
            bottoms.append(bottom)
        elif node.op == 'Activation':
            bottom = node['inputs'][0][0]
            mdict = {'relu': 'ReLU', 'sigmoid': 'Sigmoid', 'tanh': 'TanH', 'softsign': None, 'softrelu': None}
            if not mdict[attrs.act_type]:
                raise ValueError('unsupported act type: {}'.format(attrs.act_type)) 
            layers.append(getattr(caffe.layers, mdict[attrs.act_type])(layers[bottom], name=node.name.replace('_fwd', '')))
            bottoms.append(bottom)
        elif node.op == 'LeakyReLU':
            act_type = attrs.act_type
            bottom = node['inputs'][0][0]
            if act_type == 'elu':
                layers.append(caffe.layers.ELU(layers[bottom], name=node.name.replace('_fwd', ''),
                              alpha=eval(getattr(attrs, 'slope', '0.25'))))
            elif act_type == 'prelu':
                layers.append(caffe.layers.PReLU(layers[bottom], name=node.name.replace('_fwd', '')))
            else:
                raise ValueError('unsupported act type: {}'.format(attrs.act_type))
            bottoms.append(bottom)
        elif node.op == 'BatchNorm':
            bottom = node['inputs'][0][0]
            bn = caffe.layers.BatchNorm(layers[bottom], name=node.name.replace('_fwd', '')+'/bn',
                    moving_average_fraction=eval(getattr(attrs, 'momentum', '0.9')),
                    eps=eval(getattr(attrs, 'eps', '0.001')))
            layers.append(caffe.layers.Scale(bn, name=node.name.replace('_fwd', '')+'/scale', bias_term=True))
            bottoms.append(bottom)
        elif node.op == 'elemwise_add':
            bottom = [b[0] for b in node['inputs'][0]]
            layers.append(caffe.layers.Eltwise(*[layers[b] for b in bottom], name=node.name.replace('_fwd', ''),
                          operation='SUM'))
            bottoms.extend(bottom)
        elif node.op == 'Concat':
            bottom = [b[0] for b in node['inputs']]
            layers.append(caffe.layers.Concat(*[layers[b] for b in bottom], name=node.name.replace('_fwd', '')))
            bottoms.extend(bottom)
        elif node.op == 'softmax':
            bottom = node['inputs'][0][0]
            layers.append(caffe.layers.Softmax(layers[bottom], name=node.name.replace('_fwd', ''),
                          axis=eval(getattr(attrs, 'axis', '-1'))))
            bottoms.append(bottom)
        elif node.op == '_mul_scalar':
            bottom = node['inputs'][0][0]
            layers.append(caffe.layers.Power(layers[bottom], name=node.name.replace('_fwd', ''), scale=eval(getattr(attrs, 'scalar', '-1'))))
            bottoms.append(bottom)
        elif node.op == 'relu':
            bottom = node['inputs'][0][0]
            layers.append(caffe.layers.ReLU(layers[bottom], name=node.name.replace('_fwd', '')))
            bottoms.append(bottom)
        elif node.op == 'Reshape':
            bottom = node['inputs'][0][0]
            if node.name.startswith('channelshuffle') and node.name.endswith('_reshape0'):
                layers.append(caffe.layers.ShuffleChannel(layers[bottom], name=node.name.replace('_reshape0', ''),
                              group=eval(attrs.shape)[2]))
            else:
                layers.append(layers[bottom])
            bottoms.append(bottom)
        elif node.op in ['Dropout','Flatten', 'SwapAxis']:
            bottom = node['inputs'][0][0]
            layers.append(layers[bottom])
            bottoms.append(bottom)
        else:
            raise ValueError('unsupported operator: {}'.format(node.op))
    outputs = []
    for i, layer in enumerate(layers):
        if layer and i not in bottoms:
            outputs.append(layer)
    # print(caffe.to_proto(*outputs))
    with open(prototxt, 'w') as f:
        print(caffe.to_proto(*outputs), file=f)

def convert_model(mx_net, prototxt, model):
    cf_net = caffe.Net(prototxt, phase=caffe.TEST)
    cf_params = cf_net.params
    mx_params = mx_net.collect_params()

    for key, param in mx_params.items():
        if key.endswith('_weight'): # conv
            cf_params[key[:-7]][0].data.flat = mx_params[key].data().asnumpy().flat
        elif key.endswith('_bias'):
            cf_params[key[:-5]][1].data.flat = mx_params[key].data().asnumpy().flat
        elif key.endswith('_alpha'): # prelu
            cf_params[key[:-6]][0].data.flat = mx_params[key].data().asnumpy().flat
        elif key.endswith('_running_mean'): # bn
            cf_key = key.replace('_running_mean', '/bn')
            cf_params[cf_key][0].data.flat = mx_params[key].data().asnumpy().flat
        elif key.endswith('_running_var'):
            cf_key = key.replace('_running_var', '/bn')
            cf_params[cf_key][1].data.flat = mx_params[key].data().asnumpy().flat
            cf_params[cf_key][2].data[...] = 1
        elif key.endswith('_gamma'): # scale
            cf_key = key.replace('_gamma', '/scale')
            cf_params[cf_key][0].data.flat = mx_params[key].data().asnumpy().flat
        elif key.endswith('_beta'): # bn
            cf_key = key.replace('_beta', '/scale')
            cf_params[cf_key][1].data.flat = mx_params[key].data().asnumpy().flat
        else:
            raise KeyError('Unsupported param: {}'.format(key))
    cf_net.save(model)

def convert_mv(net, mean=127.5, norm=0.0078125, prefix='conv0'):
  for k, v in net.params.items():
    if k.startswith(prefix):
      weight = v[0].data
      v[0].data[...] = norm * weight
      bias = v[1].data
      tmp = mean * weight
      v[1].data[...] = bias - tmp.sum(axis=(1,2,3))

def convert_bgr(net, prefix='conv0'):
    for k, v in net.params.items():
        if k.startswith(prefix):
            weight = v[0].data
            weight = weight[:, (2,1,0), :, :]
            v[0].data[...] = weight

def conv_bn_top(layers, name, top, bn_maps):
    bn_maps[name] = {}
    for l in layers:
        if len(l.bottom) == 0:
            continue
        if l.bottom[0] == top and l.type == "BatchNorm":
            bn_maps[name]["bn"] = l.name
            top = l.top[0]
        if l.bottom[0] == top and l.type == "Scale":
            bn_maps[name]["scale"] = l.name
            top = l.top[0]
    return top

def merge_bn_proto(prototxt):
    net_specs = caffe_pb2.NetParameter()
    net_specs2 = caffe_pb2.NetParameter()
    with open(prototxt, 'r') as f:
        text_format.Merge(f.read(), net_specs)
    net_specs2.MergeFrom(net_specs)
    layers = net_specs.layer
    num_layers = len(layers)
    for i in range(num_layers - 1, -1, -1):
         del net_specs2.layer[i]
    bn_maps = {}
    for idx in range(num_layers):
        l = layers[idx]
        if l.type == 'BatchNorm' or l.type == 'Scale':
            continue
        elif 'Convolution' in l.type:
            top = conv_bn_top(layers, l.name, l.top[0], bn_maps)
            bn_maps[l.name]['type'] = l.type
            layer = net_specs2.layer.add()
            layer.MergeFrom(l)
            layer.top[0] = top
            if 'bn' in bn_maps[l.name]:
                try:
                    layer.convolution_param.ClearField('bias_term')
                except:
                    pass
        else:
            layer = net_specs2.layer.add()
            layer.MergeFrom(l)
    with open(prototxt, 'w') as fp:
        fp.write(str(net_specs2))
    return bn_maps

def merge_bn_model(net, prototxt, bn_maps, eps=1e-5):
    nobn = caffe.Net(prototxt, phase=caffe.TEST)
    for key, params in nobn.params.items():
        if key not in bn_maps or 'bn' not in bn_maps[key]:
            for i, param in enumerate(params):
                param.data[...] = net.params[key][i].data
        else:
            weight = net.params[key][0].data
            if bn_maps[key]['type'] in ['Convolution', 'ConvolutionDepthwise']: 
                channels = weight.shape[0]
            elif bn_maps[key]['type'] == 'Deconvolution': 
                channels = wt.shape[1]
            else:
                raise KeyError('Type error: {}'.format(bn_maps[key]['type']))
            if len(net.params[key]) > 1:
                bias = net.params[key][1].data
            else:
                bias = np.zeros(channels)
            bn = net.params[bn_maps[key]['bn']]
            mean = bn[0].data
            var = bn[1].data
            scalef = bn[2].data
            scale = net.params[bn_maps[key]['scale']]
            scales = scale[0].data
            shift = scale[1].data
            if scalef != 0:
                scalef = 1 / scalef
            mean = mean * scalef
            var = var * scalef
            rstd = 1. / np.sqrt(var + eps)
            if bn_maps[key]['type'] in ['Convolution', 'ConvolutionDepthwise']:
                rstd1 = rstd.reshape((channels,1,1,1))
                scales1 = scales.reshape((channels,1,1,1))
                weight = weight * rstd1 * scales1
            else:
                rstd1 = rstd.reshape((1, channels,1,1))
                scales1 = scales.reshape((1, channels,1,1))
                weight = weight * rstd1 * scales1
            bias = (bias - mean) * rstd * scales + shift
            params[0].data[...] = weight
            params[1].data[...] = bias
    return nobn

if __name__ == '__main__':
    prototxt = sys.argv[1]
    proto = caffe_pb2.NetParameter()
    sys.stdout.write('https://fomoro.com/projects/project/receptive-field-calculator#')
    with open(prototxt, 'r') as f:
        text_format.Merge(str(f.read()), proto)
    for layer in proto.layer:
        if layer.type in ['Convolution'] and layer.name[-1] not in ['c', 'd']:
            kernel_size = layer.convolution_param.kernel_size
            stride = layer.convolution_param.stride
            sys.stdout.write('{},{},1,SAME;'.format(
                kernel_size[0] if kernel_size else 1, 
                stride[0] if stride else 1))
    print('')