# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from torchvision import transforms

# input
r = torch.ones(5, 5)
g = torch.full((5, 5), 2.)
b = torch.full((5, 5), 3.)
input = torch.stack([r, g, b], dim=0)
input.unsqueeze_(dim=0)  # C*H*W to B*C*H*W
# print(input)

# create convolution layer
conv_layer = nn.Conv2d(3, 1, 3, padding=0, stride=1, bias=False)
conv_layer.weight.data = torch.ones(1, 3, 3, 3)
# print("bias: ..{}..".format(conv_layer.bias))

# calculation
output = conv_layer(input)
print("=========================== 1) padding = 0 ===========================")
print("卷积核为1：")
print("卷积前尺寸:{}\n卷积后尺寸:{}".format(input.shape, output.shape))
print("所有像素值为:\n{}".format(output))

conv_layer.weight.data = torch.ones(1, 3, 3, 3) * 2
output = conv_layer(input)
print("卷积核为2：")
print("卷积前尺寸:{}\n卷积后尺寸:{}".format(input.shape, output.shape))
print("所有像素值为:\n{}".format(output))

conv_layer = nn.Conv2d(3, 1, 3, padding=1, stride=1, bias=False)
conv_layer.weight.data = torch.ones(1, 3, 3, 3)

output = conv_layer(input)
print("=========================== 1) padding = 1 ===========================")
print("卷积核为1：")
print("卷积前尺寸:{}\n卷积后尺寸:{}".format(input.shape, output.shape))
print("所有像素值为:\n{}".format(output))

conv_layer.weight.data = torch.ones(1, 3, 3, 3) * 2
output = conv_layer(input)
print("卷积核为2：")
print("卷积前尺寸:{}\n卷积后尺寸:{}".format(input.shape, output.shape))
print("所有像素值为:\n{}".format(output))
