import cv2
import copy
import torch
from device import DEVICE
from models.yolo import YoloBody
from transform_tools import TransformTools, create_mask_image
from torchvision import transforms
import torch.nn.functional as F

model = torch.load("senet.pth")

tt = TransformTools()


def make_image_data(img_file, crop_region, resize_h, resize_w):
    img = cv2.imread(img_file, cv2.IMREAD_COLOR)
    src_img_tensor = tt.img2tensor(img)

    tgt_img_tensor = copy.deepcopy(src_img_tensor)
    # 使用转换对象调整图像尺寸
    src_img_tensor = tt.resize_transform(src_img_tensor)
    tgt_img_tensor = tt.resize_transform(tgt_img_tensor)

    crop_region = [
        int(crop_region[0] * (resize_h / tgt_img_tensor.size()[1])),
        int(crop_region[1] * (resize_h / tgt_img_tensor.size()[1])),
        int(crop_region[2] * (resize_w / tgt_img_tensor.size()[2])),
        int(crop_region[3] * (resize_w / tgt_img_tensor.size()[2]))
    ]

    resize_target_transform = transforms.Resize((resize_h, resize_w))
    tgt_img_tensor = resize_target_transform(tgt_img_tensor)

    # image2 = torch.zeros_like(tgt_img_tensor)
    # image2[:, crop_region[0]:crop_region[1], crop_region[2]:crop_region[3]] = 1
    # tgt_img_tensor = image2[0].unsqueeze(0)

    # y1 y2 x1 x2
    tgt_img_tensor = create_mask_image(tgt_img_tensor, crop_region)
    return src_img_tensor, tgt_img_tensor


resize_h, resize_w = 128, 96
s1, t1 = make_image_data("1.jpg", (200, 430, 500, 650), resize_h, resize_w)
s2, t2 = make_image_data("2.jpg", (200, 430, 500, 650), resize_h, resize_w)
s3, t3 = make_image_data("3.jpg", (200, 430, 500, 650), resize_h, resize_w)
s4, t4 = make_image_data("4.jpg", (200, 430, 500, 650), resize_h, resize_w)

batch_src_img_tensor = s2.unsqueeze(0).to(DEVICE)

model = YoloBody().to(DEVICE)
output = model(batch_src_img_tensor)

loss_fn = torch.nn.MSELoss()
t2 = t2.unsqueeze(0)
print(loss_fn(t2, output.to("cpu")))

# image2 = torch.zeros(1, 1, 1024, 768)
print(output[:, :, 25:50, 37:63])
# src_img = tt.tensor2img(output[0].to("cpu"))
# src_img.show()
