import pickle
import numpy as np
import torch
from collections import OrderedDict
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, metavar='CHECKPOINT', help='path for caffe model checkpoint')
parser.add_argument('--output', type=str, metavar='OUTPUT', help='path for output pytorch model file')
parser.add_argument('--to', type=str, default='maskrcnn', choices=['backbone', 'maskrcnn'], help='backbone | maskrcnn')

args = parser.parse_args()
print(args)
state = pickle.load(open(args.checkpoint, 'rb'), encoding='latin1')
if 'blobs' in state:
    state = state['blobs']
key_mapping_dict = dict({
    'conv1_w': 'backbone.conv1.weight',
    'conv1_b': 'backbone.conv1.bias',
    'res_conv1_bn_s': 'backbone.bn1.weight',
    'res_conv1_bn_b': 'backbone.bn1.bias',
    'bbox_pred_w': 'bbox_head.weight',
    'bbox_pred_b': 'bbox_head.bias',
    'cls_score_w': 'cls_score.weight',
    'cls_score_b': 'cls_score.bias',
    # setting 1
    'conv_rpn_w': 'rpn.conv.weight',
    'conv_rpn_b': 'rpn.conv.bias',
    'rpn_cls_logits_w': 'rpn.cls_logits.weight',
    'rpn_cls_logits_b': 'rpn.cls_logits.bias',
    'rpn_bbox_pred_w': 'rpn.bbox_pred.weight',
    'rpn_bbox_pred_b': 'rpn.bbox_pred.bias',
    # setting 2
    'conv_rpn_fpn2_w': 'rpn.conv.weight',
    'conv_rpn_fpn2_b': 'rpn.conv.bias',
    'rpn_cls_logits_fpn2_w': 'rpn.cls_logits.weight',
    'rpn_cls_logits_fpn2_b': 'rpn.cls_logits.bias',
    'rpn_bbox_pred_fpn2_w': 'rpn.bbox_pred.weight',
    'rpn_bbox_pred_fpn2_b': 'rpn.bbox_pred.bias',
    'conv5_mask_w': 'mask_head.transposed_conv.weight',
    'conv5_mask_b': 'mask_head.transposed_conv.bias',
    'mask_fcn_logits_w': 'mask_head.fcn_logits.weight',
    'mask_fcn_logits_b': 'mask_head.fcn_logits.bias',
})


def key_caffe2torch(key):
    if key.startswith('res') and key[3] != '_':  # e.g. res2_0_branch2a_w
        layer_id = int(key[3]) - 1
        prefix = ''
        if args.to == 'maskrcnn':
            prefix = layer_id < 4 and 'backbone' or 'conv_head'
        new_key = '{}.layer{}.{}{}'.format(prefix, layer_id, int(key[5]), key[6:])
        # e.g. layer1.0.branch2a_w, layer3.4.branch2a_bn_b
        if '_branch1' in new_key:
            new_key = new_key.replace('_branch1', '.downsample')
            if new_key.endswith('_bn_s'):
                new_key = new_key.replace('_bn_s', '.1.weight')
            elif new_key.endswith('_bn_b'):
                new_key = new_key.replace('_bn_b', '.1.bias')
            elif new_key.endswith('_w'):
                new_key = new_key.replace('_w', '.0.weight')
            elif new_key.endswith('_b'):
                new_key = new_key.replace('_b', '.0.bias')
            else:
                raise ValueError
        elif '_branch2a' in new_key or '_branch2b' in new_key or '_branch2c' in new_key:
            num_id = '_branch2a' in new_key and '1' or '_branch2b' in new_key and '2' or '_branch2c' in new_key and '3'
            new_key = new_key.replace('_branch2a', '').replace('_branch2b', '').replace('_branch2c', '')
            if new_key.endswith('_bn_s'):
                new_key = new_key.replace('_bn_s', '.bn{}.weight'.format(num_id))
            elif new_key.endswith('_bn_b'):
                new_key = new_key.replace('_bn_b', '.bn{}.bias'.format(num_id))
            elif new_key.endswith('_w'):
                new_key = new_key.replace('_w', '.conv{}.weight'.format(num_id))
            elif new_key.endswith('_b'):
                new_key = new_key.replace('_b', '.conv{}.bias'.format(num_id))
            else:
                print(new_key, 'cannot be verified')
                raise KeyError
    elif key in key_mapping_dict:
        new_key = key_mapping_dict[key]
    else:
        print(key, 'cannot verified')
        raise KeyError

    print(key, '-->', new_key)
    return new_key


def is_excluded_key(key):
    # sgd momentum not needed for eval
    # fc remove
    return key.endswith('_momentum') or key.startswith('fc1000')


state = OrderedDict((key_caffe2torch(k), torch.Tensor(v)) for k, v in state.items()
                    if not is_excluded_key(k))
torch.save(obj=state, f=args.output)
print('Conversion Done.')

'''
if self.use_mask_head:
    if self.mask_head_type == '1up4convs':
        print('-> loading 1up4convs mask head weights')
        self.mask_head.conv_head.fcn1.weight.data = torch.FloatTensor(caffe_data['_[mask]_fcn1_w'])
        self.mask_head.conv_head.fcn1.bias.data = torch.FloatTensor(caffe_data['_[mask]_fcn1_b'])
        self.mask_head.conv_head.fcn2.weight.data = torch.FloatTensor(caffe_data['_[mask]_fcn2_w'])
        self.mask_head.conv_head.fcn2.bias.data = torch.FloatTensor(caffe_data['_[mask]_fcn2_b'])
        self.mask_head.conv_head.fcn3.weight.data = torch.FloatTensor(caffe_data['_[mask]_fcn3_w'])
        self.mask_head.conv_head.fcn3.bias.data = torch.FloatTensor(caffe_data['_[mask]_fcn3_b'])
        self.mask_head.conv_head.fcn4.weight.data = torch.FloatTensor(caffe_data['_[mask]_fcn4_w'])
        self.mask_head.conv_head.fcn4.bias.data = torch.FloatTensor(caffe_data['_[mask]_fcn4_b'])
# load FPN weights
if self.use_fpn_body:
    print('-> loading FPN lateral weights')
    for i in range(len(self.conv_body.fpn_layers)):
        l = self.conv_body.fpn_layers[i]
        # get name of last conv layer of each ResNet block which is used for FPN
        k_caffe = parse_th_to_caffe2(
            (l + '.' + list(getattr(self.model, l).state_dict().keys())[-1]).split('.'))
        k_caffe = k_caffe[:k_caffe.rfind("_")]
        if i < len(self.conv_body.fpn_layers) - 1:
            suffix = '_sum_lateral'
        else:
            suffix = '_sum'
        self.conv_body.fpn_lateral[i].weight.data = torch.FloatTensor(
            caffe_data['fpn_inner_' + k_caffe + suffix + '_w'])
        self.conv_body.fpn_lateral[i].bias.data = torch.FloatTensor(
            caffe_data['fpn_inner_' + k_caffe + suffix + '_b'])
        self.conv_body.fpn_output[i].weight.data = torch.FloatTensor(
            caffe_data['fpn_' + k_caffe + '_sum_w'])
        self.conv_body.fpn_output[i].bias.data = torch.FloatTensor(caffe_data['fpn_' + k_caffe + '_sum_b'])
# load 2 layer mlp weights
if self.use_two_layer_mlp_head:
    print('-> loading two layer mlp conv head...')
    self.conv_head.fc6.weight.data = torch.FloatTensor(caffe_data['fc6_w'])
    self.conv_head.fc6.bias.data = torch.FloatTensor(caffe_data['fc6_b'])
    self.conv_head.fc7.weight.data = torch.FloatTensor(caffe_data['fc7_w'])
    self.conv_head.fc7.bias.data = torch.FloatTensor(caffe_data['fc7_b'])
'''