# -*- coding: utf-8
"""
# @file name    : nn_layers_others.py
# @author       : QuZhang
# @date         : 2020-12-14 14:13
# @brief        : 其他网络层
"""
import os
import sys
from tools.common_tools import set_seed
from PIL import Image
from torchvision.transforms import transforms
import torch.nn as nn
from tools.common_tools import transform_invert
from matplotlib import pylab as plt
import torch


os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
code_DIR = os.path.abspath(os.path.dirname(__file__)+os.path.sep+".."+os.path.sep+"..")
sys.path.append(code_DIR)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
path_tools = os.path.abspath(os.path.join(BASE_DIR, '..', '..', 'tools', 'common_tools.py'))
assert os.path.exists(path_tools), "{}不存在，请将common_tools.py文件放到 {}".format(path_tools, os.path.dirname(path_tools))

set_seed(1)  # 设置随机种子

if __name__ == '__main__':
    # ------------- load img -------------
    path_img = os.path.join(BASE_DIR, 'lena.png')
    img = Image.open(path_img).convert("RGB")  # 0~255

    img_transform = transforms.Compose([transforms.ToTensor()])
    img_tensor = img_transform(img)
    img_tensor.unsqueeze_(dim=0)  # 3维变成4维

    # ================= pooling ==============
    # 池化作用：冗余信息的删除，减少计算量

    # 最大池化：MaxPool2d
    # flag = True
    flag = False
    if flag:
        maxpool_layer = nn.MaxPool2d((2, 2), stride=(2, 2))  # 定义池化层
        img_pool = maxpool_layer(img_tensor)  # 池化

    # 平均池化：AvgPool2d
    # flag = True
    flag = False
    if flag:
        avgpool_layer = nn.AvgPool2d((2, 2), stride=(2, 2))
        img_pool = avgpool_layer(img_tensor)

    # 上采样池化
    # 最大值池化上采样：MaxUnpool2d
    # flag = True
    flag = False
    if flag:
        # 最大池化下采样
        img_tensor = torch.randint(high=5, size=(1, 1, 4, 4), dtype=torch.float)
        maxpool_layer = nn.MaxPool2d((2, 2), stride=(2, 2), return_indices=True)
        img_pool, indices = maxpool_layer(img_tensor)  # 池化，记录最大值所在的索引位置

        # 最大池化上采样
        img_reconstruct = torch.randn_like(img_pool, dtype=torch.float)
        maxunpool_layer = nn.MaxUnpool2d((2, 2), stride=(2, 2))
        img_unpool = maxunpool_layer(img_reconstruct, indices)

        # 从哪个位置下采样而来，上采样时就填到该位置
        print("raw_img:\n{}\nimg_pool:\n{}".format(img_tensor, img_pool))
        print("img_reconstruct:\n{}\nimg_unpool:\n{}".format(img_reconstruct, img_unpool))

    # 线性层，也称全连接层：Linear
    # inputs × (weight.data)^T
    flag = True
    # flag = False
    if flag:
        # 输入神经元数据
        inputs = torch.tensor([[1., 2, 3],
                               [1, 1, 1]])
        linear_layer = nn.Linear(3, 4)
        # 全连接层参数
        linear_layer.weight.data = torch.tensor([[1., 1., 1.],
                                                 [2., 2., 2.],
                                                 [3., 3., 3.],
                                                 [4., 4., 4.]])
        linear_layer.bias.data.fill_(0.5)
        # 输出神经元
        outputs = linear_layer(inputs)  # 计算
        print("inputs.shape:\n{}\ninputs:\n{}".format(inputs.shape, inputs))
        print("linear_layer.weight.data.shape:\n{}\nlinear_layer.weight.data:\n{}".format(linear_layer.weight.data.shape, linear_layer.weight.data))
        print("outputs.shape:\n{}\noutputs:\n{}".format(outputs.shape, outputs))

    # ========== visualization ===============
    # print("池化前的尺寸：{}\n池化后的尺寸：{}".format(img_tensor.shape, img_pool.shape))
    # for i in range(img_pool.shape[0]):
    #     img_pool = transform_invert(img_pool[i], img_transform)
    #     img_raw = transform_invert(img_tensor.squeeze(), img_transform)
    #     plt.subplot(121).imshow(img_raw)
    #     plt.subplot(122).imshow(img_pool)
    #     plt.show()
