# -*- coding:utf-8 -*-
"""
# @file name    : transforms_method_1.py
# @author       : QuZhang
# @date         : 2020-12-05 23:10
# @brief        : transforms方法(一)
"""
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
from torchvision import transforms
from tools.my_dataset import RMBDataset
from torch.utils.data import DataLoader
import torch
from PIL import Image
import matplotlib.pyplot as plt


BATCH_SIZE = 4

def transform_invert(img_, transform_train):
    """
    将data数据进行 反transform操作,将张量变成图片
    :param img_ : tensor
    :param transform_train : torchvision.transforms
    :return : PIL image
    """
    # print("transform_train: ", transform_train.transforms)  # 打印预处理操作
    if "Normalize" in str(transform_train):
        norm_transform = list(filter(lambda x: isinstance(x, transforms.Normalize), transform_train.transforms))  # 过滤掉非Normalize操作对应的对象
        # print("norm_transform: {}".format(norm_transform))  # 打印Normlize对应的对象
        mean = torch.tensor(norm_transform[0].mean, dtype=img_.dtype, device=img_.device)
        std = torch.tensor(norm_transform[0].std, dtype=img_.dtype, device=img_.device)
        # print("mean: {}, \nstd: {}".format(mean, std))  # 一维
        # print("mean: {}, \nstd: {}".format(mean[:, None, None], std[:, None, None]))  # 三维,与图片的三通道对应
        # print("img_: {}".format(img_))
        img_.mul_(std[:, None, None]).add_(mean[:, None, None])  # 反归一化

    img_ = img_.transpose(0, 1).transpose(1, 2)  # (C, H, W) --> (H, W, C)
    # print("img_: {}".format(img_))
    if "ToTensor" in str(transform_train) or img_.max() < 1:
        img_ = img_.detach().numpy() * 255  # 恢复像素值为0~255

    if img_.shape[2] == 3:
        # 三通道，RGB图像
        img_ = Image.fromarray(img_.astype('uint8')).convert("RGB")  # 使用numpy数组生成图像
    elif img_.shape[2] == 1:
        # 单通道，灰度度
        img_ = Image.fromarray(img_.astype('uint8').squeeze())  # 去掉维度为1的维度，只保留一个矩阵
    else:
        raise Exception("Invalid img shape, expected 1 or 3 in axis 2, but got {}!".format(img_.shape[2]))

    return img_

if __name__ == "__main__":

    split_dir = os.path.abspath(os.path.join(BASE_DIR, "..", '..', 'data', 'rmb_split'))
    if not os.path.exists(split_dir):
        raise Exception(r"数据{} 不存在，回到lesson-06-1_split_dataset.py生成数据".format(split_dir))
    train_dir = os.path.join(split_dir, 'train')
    norm_mean = [0.485, 0.456, 0.406]
    norm_std = [0.229, 0.224, 0.225]
    train_transform = transforms.Compose([
        transforms.Resize((224, 224)),

        # 1.中心裁剪
        # transforms.CenterCrop(512),

        # 2.随机裁剪
        # transforms.RandomCrop(224, padding=16),  # 对 左右上下 均填充16个像素值,填充值默认为0
        # transforms.RandomCrop(224, padding=(16, 64)),  # 左右填充16个像素, 上下填充64个像素
        # transforms.RandomCrop(224, padding=16, fill=(255, 0, 0)),  # 自定义填充像素值，带颜色(R, G, B)
        # transforms.RandomCrop(512, pad_if_needed=True),  # 裁剪后的尺寸大于原图，必须设置填充
        # transforms.RandomCrop(224, padding=16, padding_mode='edge'),  # 使用边缘像素值填充
        # transforms.RandomCrop(224, padding=64, padding_mode="reflect"),

        # 3. 随机设置图片大小后，再裁剪
        # transforms.RandomResizedCrop(size=224, scale=(0.5, 0.5)),

        # 4. 裁剪出5张
        # transforms.FiveCrop(112),  # 裁剪后产生 包含5个元素的元组
        # 遍历元组，取出每一个元组，并转换为张量，最后用stack将她们连接成一个张量
        # transforms.Lambda(lambda crops: torch.stack([ (transforms.ToTensor()(corp)) for corp in crops])),

        # 5.裁剪出10张
        # transforms.TenCrop(112, vertical_flip=False),  # 先裁剪出5张，再水平翻转
        # transforms.Lambda(lambda crops: torch.stack([ (transforms.ToTensor()(crop)) for crop in crops])),  # 自定义lambda方法

        # 1. 依概率水平/垂直翻转图片
        # transforms.RandomHorizontalFlip(p=0.5),
        # transforms.RandomVerticalFlip(p=0.5),

        # 2. 根据角度旋转图片
        # transforms.RandomRotation(90),
        # transforms.RandomRotation((-90, 90)),
        transforms.RandomRotation((-45, 90)),

        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    train_dir = RMBDataset(data_dir=train_dir, transform=train_transform)

    train_loader = DataLoader(dataset=train_dir, batch_size=BATCH_SIZE, shuffle=True)

    for i, data in enumerate(train_loader):

        if i < 1:
            # 批量加载的图片一般是4维的张量(由多个三维张量构成)：图片张数，每张图片是一个三维张量,每张图片的每个通道是一个二维张量
            inputs, labels = data  # inputs:(B, C, H, W)
            # print("img_tensor:{}".format(inputs))
            print("label: {}".format(labels))

            crop_flag = False
            if crop_flag is False:
                for j in range(BATCH_SIZE):
                    # 获取每一种图片对应的张量,根据四维张量的第一维度一个一个读取图片对应的三维张量
                    img_tensor = inputs[j, ...]  # (C, H, W)
                    # print("img_tensor:", img_tensor)
                    img = transform_invert(img_tensor, train_transform)
                    plt.imshow(img)
                    plt.show()
                    plt.pause(0.5)
                    plt.close()
            else:
                # (读取的图片张数, 每张图片裁剪后的张数, 通道*高*宽)
                bs, ncrops, c, h, w = inputs.shape

                # 一张一张的遍历
                for j in range(BATCH_SIZE):
                    # 一张图片裁剪出多张
                    for n in range(ncrops):
                        img_tensor = inputs[j, n, ...]
                        img = transform_invert(img_tensor, train_transform)
                        plt.imshow(img)
                        # plt.savefig("corps"+str(n))
                        plt.show()
                        plt.pause(0.5)
                        plt.close()
