# -*- coding: utf-8 -*-
# File  : Fast-RCNN.py.py
# Author: Pengwenyu
# Date  : 2019/8/23

#https://blog.csdn.net/zw__chen/article/details/82706019
from torchvision import datasets, transforms,models
import torch
from torch import *
import numpy as np
import cv2
#如下方式将torch 转为numpy用opencv显示
# image=torch.zeros((1,3,800,800)).float().squeeze().permute(1,2,0).numpy()
# cv2.imshow("123",image)
# cv2.waitKey()
data=torch.zeros((1,3,800,800)).float()
bbox = torch.FloatTensor([[20, 30, 400, 500], [300, 400, 500, 600]])
# image = data.squeeze().permute(1,2,0).numpy()
image = cv2.imread("black.jpg")
# print(data,image)
#[y1,x1,y2,x2]format
labels = torch.LongTensor([6,8])
sub_sample = 16

#生成一个dummy image并且设置volatile为False
dummy_img = torch.zeros((1,3,800,800)).float()
# print(dummy_img)
#
# 列出VGG16的所有层
model = models.vgg16(pretrained=True)
fe = list(model.features)
# print(fe)  # length is 15



#将图像传输通过所有层，确定得到相应的尺寸
# fee=[]
req_features = []
k = dummy_img.clone()
for i in fe:
    k = i(k)
    if k.size()[2] < 800/16:
        break
    req_features.append(i)
    out_channels = k.size()[1]
# print(len(req_features))
# print(out_channels)

#减list转换为sequence moudule：
faster_rcnn_fe_extractor = torch.nn.Sequential(*req_features)
# data=torch.Tensor(image)
out_map = faster_rcnn_fe_extractor(data)
# print(out_map.size())


ratios = [0.5, 1, 2]
anchor_scales = [8, 16, 32]

anchor_base = np.zeros((len(ratios) * len(anchor_scales), 4), dtype=np.float32)
# print(anchor_base)

ctr_y = (sub_sample/2)
ctr_x = (sub_sample/2)
for i in range(len(ratios)):
    for j in range(len(anchor_scales)):
        h = sub_sample * anchor_scales[j] * np.sqrt(ratios[i])
        w = sub_sample * anchor_scales[j] * np.sqrt(1./ratios[i])
        index = i * len(anchor_scales) + j

        anchor_base[index,0] = ctr_y - h /2.
        anchor_base[index,1] = ctr_x - w /2.
        anchor_base[index,2] = ctr_y + h /2.
        anchor_base[index,3] = ctr_x + w /2.

print(anchor_base)


#2. 在所有特征图位置生成Anchor
fe_size = (800/16)
ctr_x = np.arange(16, (fe_size+1)*16, 16)
ctr_y = np.arange(16, (fe_size+1)*16, 16)
# print(len(ctr_x),len(ctr_y))
index = 0
ctr = np.zeros(((len(ctr_x)*len(ctr_y)),2),dtype=np.float32)
for x in range(len(ctr_x)):
    for y in range(len(ctr_y)):
        ctr[index, 1] = ctr_x[x] - 8
        ctr[index, 0] = ctr_y[y] - 8
        index += 1
        # print((int)(ctr_x[x] - 8), (int)(ctr_y[y] - 8))
        cv2.circle(image, ((int)(ctr_x[x] - 8), (int)(ctr_y[y] - 8)),1, (255, 0, 0),1)



# print(ct[-50:])
#提取所有锚点的anchor
anchors = np.zeros(((22500), 4))
index = 0
for c in ctr:
    ctr_y, ctr_x = c
    for i in range(len(ratios)):
        for j in range(len(anchor_scales)):
            h = sub_sample * anchor_scales[j] * np.sqrt(ratios[i])
            w = sub_sample * anchor_scales[j] * np.sqrt(1./ratios[i])
            anchors[index, 0] = ctr_y - h / 2.
            anchors[index, 1] = ctr_x - w / 2.
            anchors[index, 2] = ctr_y + h / 2.
            anchors[index, 3] = ctr_x + w / 2.
            # cv2.rectangle(image,((int)(ctr_x - w / 2.),(int)(ctr_y - h / 2.)),((int)(ctr_x + w / 2.),(int)(ctr_y + h / 2.)),(255,255,255),1)
            index += 1
print("anchors.shape:", anchors.shape)
#
# cv2.imshow("image", image)
# cv2.waitKey()
#对比anchor 和 ground-truth-box
#重叠度最高的Intersection-over-Union (IoU)的anchor
#与IoU重叠度高于0.7 为正样本，低于0.3为负样本，在这之间的无帮助 去掉
#操作如下
bbox = np.asarray([[20,30,400,500],[300,400,500,600]],
                  dtype=np.float32)#[y1,x1,y2,x2] format
labels = np.asarray([6,8],dtype=np.int8) #0 represents background

index_inside = np.where(
    (anchors[:, 0] >= 0) &
    (anchors[:, 1] >= 0) &
    (anchors[:, 2] <= 800) &
    (anchors[:, 3] <= 800)
)[0]
print("index_inside.shape:",index_inside.shape)


#生成空的标签数组，大小为inside_index，填充-1，默认设置为(d)
label = np.empty((len(index_inside),),dtype=np.int32)
label.fill(-1)
print(label.shape)

valid_anchor_boxes = anchors[index_inside]
print(valid_anchor_boxes.shape)

ious = np.empty((len(valid_anchor_boxes),2),dtype=np.float32)
ious.fill(0)
print(bbox)
for num1,i in enumerate(valid_anchor_boxes):
    ya1,xa1,ya2,xa2 = i
    anchor_area = (ya2-ya1) * (xa2 - xa1)
    for num2, j in enumerate(bbox):
        yb1,xb1,yb2,xb2 = j
        box_area = (yb2 - yb1) * (xb2 - xb1)

        inter_x1 = max([xb1,xa1])
        inter_y1 = max([yb1,ya1])
        inter_x2 = min([xb2,xa2])
        inter_y2 = min([yb2,ya2])
        if (inter_x1<inter_x2) and (inter_y1 < inter_y2):
            iter_area = (inter_y2 - inter_y1)*(inter_x2 - inter_x1)
            iou = iter_area/(anchor_area + box_area - iter_area)
        else:
            iou = 0.
        ious[num1,num2] = iou
print(ious.shape)

gt_argmax_ious = ious.argmax(axis=0)
print(gt_argmax_ious)
gt_max_ious = ious[gt_argmax_ious, np.arange(ious.shape[1])]
print(gt_max_ious)

argmax_ious = ious.argmax(axis=1)
print(argmax_ious.shape)
print(argmax_ious)
max_ious = ious[np.arange(len(index_inside)), argmax_ious]
print(max_ious)

gt_argmax_ious = np.where(ious == gt_max_ious)[0]
print(gt_argmax_ious)

pos_iou_threshold = 0.7
neg_iou_threshold = 0.3

label[max_ious < neg_iou_threshold] = 0
label[gt_argmax_ious] = 1
label[max_ious >= pos_iou_threshold] = 1

pos_ratio = 0.5
n_sample = 256
n_pos = pos_ratio * n_sample

pos_index = np.where(label==1)[0]
if len(pos_index) > n_pos:
    disable_index = np.random.choice(pos_index, size=(len(pos_index) - n_pos),replace=False)
    label[disable_index] == -1


n_neg = n_sample * np.sum(label == 1)
neg_index = np.where(label == 0)[0]
if len(neg_index) > n_neg:
   disable_index = np.random.choice(neg_index, size=(len(neg_index) - n_neg), replace = False)
   label[disable_index] = -1

max_iou_bbox = bbox[argmax_ious]
print(max_iou_bbox)

height = valid_anchor_boxes[:, 2] - valid_anchor_boxes[:, 0]
width = valid_anchor_boxes[:, 3] - valid_anchor_boxes[:, 1]
ctr_y = valid_anchor_boxes[:, 0] + 0.5 * height
ctr_x = valid_anchor_boxes[:, 1] + 0.5 * width
base_height = max_iou_bbox[:, 2] - max_iou_bbox[:, 0]
base_width = max_iou_bbox[:, 3] - max_iou_bbox[:, 1]
base_ctr_y = max_iou_bbox[:, 0] + 0.5 * base_height
base_ctr_x = max_iou_bbox[:, 1] + 0.5 * base_width

eps = np.finfo(height.dtype).eps
height = np.maximum(height, eps)
width = np.maximum(width, eps)
dy = (base_ctr_y - ctr_y) / height
dx = (base_ctr_x - ctr_x) / width
dh = np.log(base_height / height)
dw = np.log(base_width / width)
anchor_locs = np.vstack((dy, dx, dh, dw)).transpose()
print(anchor_locs)

anchor_labels = np.empty((len(anchors),), dtype=label.dtype)
anchor_labels.fill(-1)
anchor_labels[index_inside] = label
print(anchor_labels.shape)

anchor_locations = np.empty((len(anchors),) + anchors.shape[1:], dtype=anchor_locs.dtype)
anchor_locations.fill(0)
anchor_locations[index_inside, :] = anchor_locs
print(anchor_locations.shape)