from model_utils.yolov1_backbone import YOLOv1Net
from proposal_utils.yolov1_bbox import dst_bbox
from proposal_utils.bbox import corners2centers
import torch
from torch import nn
import cv2
import numpy as np

device = torch.device("cuda:0")
model = YOLOv1Net(2)
model = model.to(device)

image = cv2.imread("./test_data/catdog.jpg")
h, w = image.shape[:2]
scale_y, scale_x = 448 / h, 448 / w
ground_truth = torch.tensor([[0.1, 0.08, 0.52, 0.92],
                             [0.55, 0.2, 0.9, 0.88]]) * torch.tensor([[scale_x, scale_y, scale_x, scale_y]])

anchors = ground_truth * torch.tensor([[w, h, w, h]])
image = cv2.resize(image, (448, 448))
image = np.expand_dims(image, 0)
image = torch.from_numpy(image).permute([0, 3, 1, 2])

# for anchor in anchors.int():
#     x1, y1, x2, y2 = anchor
#     cv2.rectangle(image, (x1.item(), y1.item()), (x2.item(), y2.item()), color=(0, 0, 255), thickness=2)
#
# cv2.imshow("image", image)
# cv2.waitKey(0)

ground_truth = corners2centers(ground_truth)
dst_bbox = dst_bbox(ground_truth, 2, 2)
criterion = nn.MSELoss()


def loss_bbox(box1, box2):
    # 定位误差
    box1_wh = box1[:, :, :, [2, 3, 7, 8]]
    box2_wh = box2[:, :, :, [2, 3, 7, 8]]
    loss_loc = torch.mean((box1_wh ** 0.5 - box2_wh ** 0.5) ** 2)
    # 置信度误差
    loss_conf = torch.mean((box1[:, :, :, [4, 9]] - box2[:, :, :, [4, 9]]) ** 2)
    # 类别误差
    loss_cls = torch.mean((box1[:, :, :, -2:] - box2[:, :, :, -2:]) ** 2)
    return 5 * loss_loc + loss_conf + loss_cls


sgd = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9)
model.train()
epochs = 10
for epoch in range(epochs):
    sgd.zero_grad()
    predict_bbox = model(image.to(device).float())
    loss = loss_bbox(predict_bbox, dst_bbox.to(device))
    loss.backward()
    print(loss)
    sgd.step()


