#!/usr/bin/env python
# encoding: utf-8
'''
@author: wangjianrong
@software: PyCharm
@file: resize_model.py
@time: 2019/9/2 13:26
@desc: 对coco上训练的模型进行修改，根据不同的num_classes和num_anchors修改模型参数size，使finetune能够能够正常加载与训练模型
        该文件针对cascade_rcnn进行修改
'''

import torch
import torch.nn as nn
import os

check_file = 'weights/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth'
check_name = os.path.basename(check_file)
check_name = check_name[:check_name.rfind('.')]
num_classes = 3 #including bg,e.g.,81 for coco,2 for smoke and 3 for smoke_fire
num_anchors = 3
pre_weights = torch.load(check_file)['state_dict']
for name,param in pre_weights.items():
    print(name,param.shape)
pre_weights['roi_head.bbox_head.0.fc_cls.weight'].resize_(num_classes,1024)
pre_weights['roi_head.bbox_head.0.fc_cls.bias'].resize_(num_classes)
pre_weights['roi_head.bbox_head.1.fc_cls.weight'].resize_(num_classes,1024)
pre_weights['roi_head.bbox_head.1.fc_cls.bias'].resize_(num_classes)
pre_weights['roi_head.bbox_head.2.fc_cls.weight'].resize_(num_classes,1024)
pre_weights['roi_head.bbox_head.2.fc_cls.bias'].resize_(num_classes)
pre_weights['rpn_head.rpn_cls.weight'].resize_(num_anchors,256,1,1)
pre_weights['rpn_head.rpn_cls.bias'].resize_(num_anchors)
pre_weights['rpn_head.rpn_reg.weight'].resize_(4*num_anchors,256,1,1)
pre_weights['rpn_head.rpn_reg.bias'].resize_(4*num_anchors)

#此处需要重新初始化，resize后赋值通常会很大，导致训练梯度爆炸，loss inf
nn.init.kaiming_normal_(pre_weights['rpn_head.rpn_cls.weight'], a=0, mode='fan_out', nonlinearity='relu')
nn.init.constant_(pre_weights['rpn_head.rpn_cls.bias'], 0)
nn.init.kaiming_normal_(pre_weights['rpn_head.rpn_reg.weight'], a=0, mode='fan_out', nonlinearity='relu')
nn.init.constant_(pre_weights['rpn_head.rpn_reg.bias'], 0)


print(pre_weights['rpn_head.rpn_cls.weight'].max())
print(pre_weights['rpn_head.rpn_cls.weight'].min())


print(pre_weights['rpn_head.rpn_cls.bias'].max())
print(pre_weights['rpn_head.rpn_cls.bias'].min())


print(pre_weights['rpn_head.rpn_reg.weight'].max())
print(pre_weights['rpn_head.rpn_reg.weight'].min())


print(pre_weights['rpn_head.rpn_reg.bias'].max())
print(pre_weights['rpn_head.rpn_reg.bias'].min())

for name,param in pre_weights.items():
    print(name,param.shape)

torch.save(pre_weights,'weights/{}_{}x{}.pth'.format(check_name,num_classes,num_anchors))