import os
import random
import torch
from torch import nn,Tensor
from PIL import Image,ImageDraw
from torchvision.transforms import functional as F
import numpy as np
import cv2
import imgaug.augmenters as iaa





class Compose(object):
    def __init__(self,transforms):
        self.transforms=transforms

    def __call__(self, image):
        for t in self.transforms:
            image=t(image)
        return image

class ToTensor(object):
    def __call__(self,images):
        images=torch.tensor(images,dtype=torch.float)
        images = images.permute((0,3, 1, 2)).contiguous()
        return images
    

class Normalize(object):
    def __init__(self,mean,std,inplace=False):
        self.std=std
        self.mean=mean
        self.inplace = inplace
    def __call__(self, images):
        for i in range(len(images)):
            images[i]=F.normalize(images[i], self.mean, self.std, self.inplace)
        return images


#随机水平翻转
class RandomHorizontalFlip(object):
    def __init__(self,prob=0.5):
        self.prob=prob

    def __call__(self, image):
        if random.random()<self.prob:
            image=image.flip(-1)
        return image

#填充图片在设定好的全黑图片上，然后摆放在随机位置
class Resize_Pad(object):
    def __init__(self,img_size,randing=True):
        self.img_size=img_size
        self.randing=randing
    def __call__(self, image):
        #不摆放在随机位置，左上角摆放图片
        image=self.Resize_LocLeft(image)
        image=self.Pad(image)

        return image

    def Pad(self,image,dx=0,dy=0):
        # type: (List[Tensor], int) -> Tensor

        # 创建shape为batch_shape且值全部为0的tensor
        batched_img=image.new_full([1,3,self.img_size,self.img_size],0)
        batched_img[:,:image.shape[0],dy:image.shape[1]+dy,dx:image.shape[2]+dx].copy_(image)

        return batched_img

    def Resize_LocLeft(self, image):

        im_shape = torch.tensor(image.shape[-2:])
        max_size = float(torch.max(im_shape))  # 获取高宽中的最大值

        size=float(self.img_size)# 指定输入图片的最长边长,注意是self.min_size不是min_size

        scale_factor=size/max_size  # 根据指定最小边长和图片最小边长计算缩放比例

        # interpolate利用插值的方法缩放图片
        # image[None]操作是在最前面添加batch维度[C, H, W] -> [1, C, H, W]
        # bilinear只支持4D Tensor
        image = torch.nn.functional.interpolate(
            image[None], scale_factor=scale_factor, mode='bilinear', recompute_scale_factor=True,
            align_corners=False)[0]

        return image

#将图片转为HSV图像，改变其色调(Hue),饱和度(Saturation),亮度(Value)
#输入为PIL格式，不能是tensor
class distort_image(object):
    def __init__(self,hue=.1,sat=1.5,val=1.5):
        self.hue =hue
        self.sat =sat
        self.val =val
    def __call__(self,image):
        hue = self.rand(-self.hue, self.hue)
        sat = self.rand(1, self.sat) if self.rand() < .5 else 1 / self.rand(1, self.sat)
        val = self.rand(1, self.val) if self.rand() < .5 else 1 / self.rand(1, self.val)

        x = cv2.cvtColor(np.array(image, np.float32) / 255, cv2.COLOR_RGB2HSV)
        x[..., 0] += hue*360
        x[..., 0][x[..., 0]>1] -= 1
        x[..., 0][x[..., 0]<0] += 1
        x[..., 1] *= sat
        x[..., 2] *= val
        x[x[:,:, 0]>360, 0] = 360
        x[:, :, 1:][x[:, :, 1:]>1] = 1
        x[x<0] = 0
        image = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255 # numpy array, 0 to 1
        return image

    def rand(self,a=0,b=1):
        return np.random.rand()*(b-a)+a

class image_transforms():
    def __init__(self):
        self.seq = iaa.Sequential([
            # 以50%的概率水平翻转图像
            iaa.Fliplr(0.5),
            # 以50%的概率对图像应用高斯模糊
            iaa.Sometimes(
                0.5,
                iaa.GaussianBlur(sigma=(0, 1.0))
            ),
            # 调整图像的对比度，使其线性地增加或减少，对比度的变化范围在75%到150%之间。
            iaa.LinearContrast((0.75, 1.5)),
            # 向图像添加高斯噪声，噪声的强度在0到12.75
            iaa.AdditiveGaussianNoise(scale=(0.0, 0.05 * 255)),
            #  以80%的概率对每个颜色通道的图像像素值进行缩放，缩放因子在0.8到1.2之间变化
            iaa.Multiply((0.8, 1.2), per_channel=0.2),
            # 应用仿射变换，包括缩放（scale）、平移（translate_percent）、旋转（rotate）和剪切（shear）
            iaa.Affine(
                scale=(0.5,1.5),
                translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
                rotate=(-25, 25),
                shear=(-8, 8)
            )

        ])
    def __call__(self,images):
        images = self.seq(images=images)
        return images




class transforms_detect_train(nn.Module):
    def __init__(self,img_size,mean=(0.406,0.456,0.485),std=(0.225,0.224,0.229)):
        self.img_size=img_size
        self.mean=mean
        self.std=std
        self.augment=Compose([

            # distort_image(),

            # RandomHorizontalFlip(),
            # Resize_Pad(img_size=img_size),
            image_transforms(),
            ToTensor(),
            Normalize(mean=mean,std=std)
        ])
    def __call__(self, img):
        return self.augment(img)

class TransformsDetectTrain(Compose):
    def __init__(self, img_size, mean, std):
        transforms_list = [
            image_transforms(),  # 应用 imgaug 变换（返回 NumPy 数组）
            transforms.ToPILImage(),  # 如果 ImageTransforms 返回的是 NumPy 数组，则转换为 PIL 图像（可选）
            transforms.ToTensor(),  # 将 PIL 图像转换为张量
            transforms.Normalize(mean=mean, std=std)  # 标准化张量
        ]
        
        transforms_list = transforms_list[1:] if any(isinstance(t, transforms.ToPILImage) for t in transforms_list) else transforms_list
 
        transforms_list = [
            lambda image: image_transforms()(image),  # 使用 lambda 包装以调用我们的 ImageTransforms 实例
            transforms.ToTensor(),  # 直接将 NumPy 数组（或已转换为适合 ToTensor 的格式）转换为张量
            transforms.Normalize(mean=mean, std=std)  # 标准化
        ]
        super().__init__(transforms_list)


class transforms_detect_val(nn.Module):
    def __init__(self,img_size,mean=(0.406,0.456,0.485),std=(0.225,0.224,0.229)):
        self.img_size=img_size
        self.mean=mean
        self.std=std
        self.augment=Compose([
            # distort_image(),
            ToTensor(),
            # RandomHorizontalFlip(),
            # Resize_Pad(img_size=img_size),
            Normalize(mean=mean,std=std)
        ])
    def __call__(self, img):
        return self.augment(img)




if __name__ == '__main__':
    dir='./trainset/trainset/'
    name_list=os.listdir(dir)
    import torchvision.transforms as transforms
    transform=transforms_detect_val(224)
    # transform = transforms.Compose([
    #     transforms.ToTensor(),  # 将 PIL 图像转换为 PyTorch 张量
    # ])
    # file_path = os.path.join(dir, name_list[0])
    # image = Image.open(file_path)
    # tensor_image = transform(image)
    # pil_image = transforms.ToPILImage()(tensor_image)
    # pil_image.show()  # 显示图像以检查是否正常加载
    for name in name_list:
        image=Image.open(dir+name)
        image=transform(image)
        image = transforms.ToPILImage()(image[0])
        image.show()




