# -*- coding: utf-8 -*-
# File  : RPN.py
# Author: Pengwenyu
# Date  : 2019/9/3.

import torch.nn as nn
from torchvision import datasets, transforms,models
import torch
from torch import *
import numpy as np
import cv2
#如下方式将torch 转为numpy用opencv显示
# image=torch.zeros((1,3,800,800)).float().squeeze().permute(1,2,0).numpy()
# cv2.imshow("123",image)
# cv2.waitKey()
data=torch.zeros((1,3,800,800)).float()
bbox = torch.FloatTensor([[20, 30, 400, 500], [300, 400, 500, 600]])
# image = data.squeeze().permute(1,2,0).numpy()
image = cv2.imread("black.jpg")
# print(data,image)
#[y1,x1,y2,x2]format
labels = torch.LongTensor([6,8])
sub_sample = 16

#生成一个dummy image并且设置volatile为False
dummy_img = torch.zeros((1,3,800,800)).float()
# print(dummy_img)
#
# 列出VGG16的所有层
model = models.vgg16(pretrained=True)
fe = list(model.features)
# print(fe)  # length is 15



#将图像传输通过所有层，确定得到相应的尺寸
# fee=[]
req_features = []
k = dummy_img.clone()
for i in fe:
    k = i(k)
    if k.size()[2] < 800/16:
        break
    req_features.append(i)
    out_channels = k.size()[1]
# print(len(req_features))
# print(out_channels)

#减list转换为sequence moudule：
faster_rcnn_fe_extractor = torch.nn.Sequential(*req_features)
# data=torch.Tensor(image)
out_map = faster_rcnn_fe_extractor(data)



mid_channels = 512
in_channels = 512
n_anchor = 9
conv1 = nn.Conv2d(in_channels, mid_channels, 3, 1, 1)
reg_layer = nn.Conv2d(mid_channels,n_anchor*4, 1, 1,0)
cls_layer = nn.Conv2d(mid_channels, n_anchor*2, 1, 1, 0)


conv1.weight.data.normal_(0, 0.01)
conv1.bias.data.zero_()

reg_layer.weight.data.normal_(0, 0.01)
reg_layer.bias.data.zero_()

cls_layer.weight.data.normal_(0, 0.01)
cls_layer.bias.data.zero_()

x = conv1(out_map)
pred_anchor_locs = reg_layer(x)
pred_cls_scores = cls_layer(x)

print(pred_cls_scores.shape, pred_anchor_locs.shape)

pred_anchor_locs = pred_anchor_locs.permute(0, 2, 3, 1).contiguous().view(1, -1, 4)
print(pred_anchor_locs.shape)

pred_cls_scores = pred_cls_scores.permute(0, 2, 3, 1).contiguous()
print(pred_cls_scores)
#Out torch.Size([1, 50, 50, 18])

objectness_score = pred_cls_scores.view(1, 50, 50, 9, 2)[:, :, :, :, 1].contiguous().view(1, -1)
print(objectness_score.shape)
#Out torch.Size([1, 22500])

pred_cls_scores  = pred_cls_scores.view(1, -1, 2)
print(pred_cls_scores.shape)
# Out torch.size([1, 22500, 2])

