# -*- coding: utf-8 -*-
""""
# @file name    : my_transforms.py
# @author       : QuZhang
# @date         : 2020-12-10 17:08
# @brief        : 自定义一个transforms方法
"""

import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

import sys
pytorch_code_DIR = os.path.abspath(os.path.dirname(__file__) + os.path.sep + ".." + os.path.sep + "..")
# print(pytorch_code_DIR)  # D:\CV_AMBITION\pytorch\deepshare\PyTorch\code
sys.path.append(pytorch_code_DIR)

# 添加工具路径
path_lenet = os.path.abspath(os.path.join(BASE_DIR, "..", "..", 'model', "lenet.py"))
path_tools = os.path.abspath(os.path.join(BASE_DIR, '..', '..', "tools", "common_tools.py"))
assert os.path.exists(path_lenet), "{} 不存在，请将lenet.py文件放到{}".format(path_lenet, os.path.dirname(path_lenet))
assert os.path.exists(path_tools), "{} 不存在，请将common_tools.py文件放到{}".format(path_tools, os.path.dirname(path_tools))

from tools.common_tools import set_seed
import random
import numpy as np
from PIL import Image
from torchvision.transforms import transforms
from tools.my_dataset import RMBDataset
from torch.utils.data import DataLoader
from tools.common_tools import transform_invert
from matplotlib import pyplot as plt


set_seed(1)  # 设置随机种子

class AddPeperNoise(object):
    """增加椒盐噪声
    Args:
        snr (float) : Signal Noise Rate
        p (float) : 执行操作的概率值
    """

    def __init__(self, snr, p=0.9):
        assert isinstance(snr, float) and isinstance(p, float)
        self.snr = snr
        self.p = p

    def __call__(self, img):
        # 自定义的transform只接受一个参数，返回一个参数
        """
        Args:
            img (PIL image)：PIL image
        Returns:
            PIL image: PIL image.
        """
        if random.uniform(0, 1) < self.p:
            img_ = np.array(img).copy()  # 用类型为PIL image类型的img产生一个numpy对象
            h, w, c = img_.shape
            signal_pct = self.snr  # 信号占比
            noise_pct = 1 - signal_pct  # 噪声占比
            """ 
            choice(a, size=None, replace=True, p=None)
            a：可供选择的数据有哪些
            size: 被选择出的数据的输出形式,size=(h, w, 1)表示输出h*w*1形式的numpy数组,也就是要选择出h×w×1个数据(一个通道)
            p：a中每个数据被选择的概率
            """
            # 选择出一个通道上的每一个像素点对应的mask值，mask为h*w*1的三维数组,也就是指定每一个像素点添加什么噪声
            mask = np.random.choice((0, 1, 2), size=(h, w, 1), p=[signal_pct, noise_pct/2., noise_pct/2.])
            mask = np.repeat(mask, c, axis=2)  # 沿着通道对应的维度(axis=2) 重复扩展mask数组
            img_[mask == 1] = 255  # mask为1的像素点添加 盐噪声
            img_[mask == 2] = 0  # mask为2的像素点添加 椒噪声
            return Image.fromarray(img_.astype("uint8")).convert("RGB")  # 由numpy生成PIL Image 图像对象
        else:
            return img  # 不添加噪声

if __name__ == "__main__":
    # 超参数设置
    MAX_EPOCH = 10
    BATCH_SIZE = 1
    LR = 0.01
    log_interval = 10
    val_interval = 1
    rmb_label = {"1": 0, "100": 1}

    # =========== 1/5 数据 ===================
    # 数据路径
    split_dir = os.path.abspath(os.path.join(BASE_DIR, '..', '..', 'data', 'rmb_split'))
    assert os.path.exists(split_dir), "数据 {} 不存在,回到lesson-06\1_split_dataset.py生成数据".format(split_dir)
    train_dir = os.path.join(split_dir, "train")
    valid_dir = os.path.join(split_dir, "test")

    # 数据预处理
    norm_mean = [0.485, 0.456, 0.406]
    norm_std = [0.229, 0.224, 0.225]
    train_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        AddPeperNoise(0.9, p=0.5),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    valid_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    # 构建MyDataset实例
    train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
    valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)

    # 构建DataLoader
    train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
    valid_loader = DataLoader(dataset=valid_dir, batch_size=BATCH_SIZE)

    # ================== 5/5 训练 ==============
    for epoch in range(MAX_EPOCH):
        for i, data in enumerate(train_loader):
            inputs, labels = data
            for j in range(BATCH_SIZE):
                img_tensor = inputs[j, ...]
                img = transform_invert(img_tensor, train_transform)
                plt.imshow(img)
                plt.show()
                plt.pause(0.5)
                plt.close()
