from torch.autograd import Variable
import torch.onnx
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision import transforms
from PIL import Image
from torchvision.models.detection.rpn import AnchorGenerator
import io
import sys
import numpy as np


anchor_sizes = ((16,), (32,), (48,), (64,), (96,))
aspect_ratios = ((1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
model = torchvision.models.detection.__dict__["fasterrcnn_resnet50_fpn"](num_classes=2,
                                                              pretrained=False,
                                                              ##**kwargs
                                                              min_size=128, max_size=1280,
                                                              image_mean=(0.485, 0.456, 0.406),
                                                              image_std=(0.229, 0.224, 0.225),

                                                              # RPN parameters
                                                              rpn_anchor_generator=rpn_anchor_generator, rpn_head=None,
                                                              rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
                                                              rpn_post_nms_top_n_train=2000,
                                                              rpn_post_nms_top_n_test=1000,
                                                              rpn_nms_thresh=0.7,
                                                              rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
                                                              rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,

                                                              # Box parameters
                                                              box_roi_pool=None, box_head=None, box_predictor=None,
                                                              box_score_thresh=0.05, box_nms_thresh=0.5,
                                                              box_detections_per_img=100,
                                                              box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
                                                              box_batch_size_per_image=512, box_positive_fraction=0.25,
                                                              bbox_reg_weights=None
                                                              )



image = Image.open('2019_05_03_23_36_42_027_roi_crop.jpg').convert("RGB")
# img = Image.open(sys.argv[1]).convert('RGB')
# img = np.array(img)
transformation = transforms.Compose([
    transforms.ToTensor(),
    ])

image_tensor = transformation(image)
image_tensor = image_tensor #只能为三维


# model.cuda()
model.load_state_dict(torch.load('model/2020-03-03-17-12-43/model_11.pth',)["model"]) #map_location='cpu'
model.eval()


# just to be safe, run it once to initialize all buffers
out = model([image_tensor])
input_names = ["image_tensor"]
output_names = ["boxes","labels","scores"]
#四维的动态轴
# dynamic_axes = {'image_tensor':{0:'batch', 1:'width', 2:'height'},
#                 'boxes': {0: 'batch'},
#                 'scores': {0: 'batch'},
#                 'labels': {0: 'batch'},
#                 }
dynamic_axes = {'image_tensor':[1, 2],
                'boxes': [0],
                'scores': [0],
                'labels': [0],
                }
onnx_io = io.BytesIO()
torch.onnx.export(model,  #模型名称
                  args=[image_tensor], #模型的输入
                  f="faster_rcnn.onnx",     #输出文件名
                  input_names=input_names,  #输入节点
                  output_names=output_names, #输出节点
                  dynamic_axes=dynamic_axes,
                  verbose=True,
                  do_constant_folding=True,
                  opset_version=11)

## check
import onnx
model = onnx.load(r"faster_rcnn.onnx")

# Check that the IR is well formed
onnx.checker.check_model(model)

# Print a human readable representation of the graph
a = onnx.helper.printable_graph(model.graph)
print(a)