"""
数据增强
具体参考的是unrega中的代码
文章：用的东西     看到一篇用的东西就往这里面写
unrega：色彩空间y通道处理  灰度化
Bootstrap AutoEncoders With Contrastive Paradigm for Self-supervised Gaze Estimation: ColorJitter和Grayscale
eth-xgaze: 标准化
博客：https://www.cnblogs.com/ghgxj/p/14219097.html#grayscale

1.unrega中的色彩空间
"""

import cv2
import torchvision.transforms as transforms


def getitem_ycrcb(image_path):
    """
    这个函数的作用是对传入的图像进行亮度通道均衡化处理
    参考的unrega
    """
    img_read = cv2.imread(image_path)

    # 下面这三行是对YCrCb色彩空间中的亮度通道进行直方图均衡化处理
    ycrcb = cv2.cvtColor(img_read, cv2.COLOR_BGR2YCrCb)
    ycrcb[:, :, 0] = cv2.equalizeHist(ycrcb[:, :, 0])
    img_read = cv2.cvtColor(ycrcb, cv2.COLOR_YCrCb2RGB)

    img_bgr = cv2.cvtColor(img_read, cv2.COLOR_RGB2BGR)
    # 显示变换后的图像
    cv2.imshow("test", img_bgr)
    cv2.waitKey(0)


# 定义数据增强变换
transform = transforms.Compose([
    transforms.ToPILImage(),  # 转换为 PIL.Image
    # transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),  # 40% 颜色抖动
    # transforms.Grayscale(num_output_channels=3),  # 60% 灰度化（保持 3 通道）
    # transforms.RandomApply([transforms.Grayscale(num_output_channels=3)], p=1),  # 60% 灰度化（保持 3 通道）
    transforms.ToTensor(),  # 转换为张量
    # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # eth-xgaze使用
])


def tensor_to_cv2(tensor):
    # 将张量转换为numpy数组并调整维度顺序 (C, H, W) -> (H, W, C)
    image = tensor.numpy().transpose(1, 2, 0)
    image = image * 255
    image = image.astype('uint8')
    return cv2.cvtColor(image, cv2.COLOR_RGB2BGR)


def getitem(image_path):
    """
    这个函数是进行一些如grayscale ColorJitter
    上面的ColorJitter还要调整参数
    """
    img_read = cv2.imread(image_path)

    img_rgb = cv2.cvtColor(img_read, cv2.COLOR_BGR2RGB)
    # 数据增强
    transformed_image = transform(img_rgb)
    cv2_image = tensor_to_cv2(transformed_image)
    cv2.imshow("test", cv2_image)
    cv2.waitKey(0)


if __name__ == "__main__":
    image_path = r"F:\Gaze_Dataset\eth\Image_2023\train\subject0000\1.jpg"
    getitem(image_path)
