from torch import nn
from torchvision import transforms
import torch
import copy


def create_mask_image(mask_image_tensor, crop_region):
    # 定义起始和结束的高度、宽度
    start_height = crop_region[0]
    end_height = crop_region[1]
    start_width = crop_region[2]
    end_width = crop_region[3]

    m = copy.deepcopy(mask_image_tensor[:, start_height:end_height, :][:, :, start_width:end_width])
    # 把框体部分框柱变为 -1
    mask_image_tensor[:, start_height:end_height, :][:, :, start_width:end_width] = -1

    # in_mask = mask_image_tensor.data.eq(-1)
    # 获取不是 -1的部分
    out_mask = mask_image_tensor.data.ne(-1)
    mask_image_tensor.masked_fill_(out_mask, 0.0)
    mask_image_tensor[:, start_height:end_height, :][:, :, start_width:end_width] = m
    # print("mask_image_tensor", mask_image_tensor)
    return mask_image_tensor


class ChannelReorder(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        return x[[2, 1, 0], :, :]


class TransformTools(object):
    def __init__(self):
        self.mean = [0.5, 0.5, 0.5]
        self.std = [0.5, 0.5, 0.5]
        self.h = 1024
        self.w = 768

        self.img2tensor = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=self.mean, std=self.std)
        ])

        self.tensor2img = transforms.Compose([
            ChannelReorder(),
            transforms.Normalize(mean=[-m / s for m, s in zip(self.mean, self.std)], std=[1 / s for s in self.std]),
            transforms.ToPILImage()
        ])

        # 创建一个转换对象，用于调整图像尺寸
        self.resize_transform = transforms.Resize((self.h, self.w))
