import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l

# net = nn.Sequential(nn.Linear(20,256),nn.ReLU(),nn.Linear(256,10))
# X = torch.rand(2,20)
# print(net(X))

# class MLP(nn.Module):
#     def __init__(self):
#         super().__init__()
#         self.hidden = nn.Linear(20,256)
#         self.out = nn.Linear(256,10)
#
#     def forward(self,X):
#         return self.out(F.relu(self.hidden(X)))

# class MySequential(nn.Module):
#     def __init__(self,*args):
#         super().__init__()
#         for block in args:  # 存进去
#             self._modules[block]=block
#
#     def forward(self,X):
#         for block in self._modules.values():
#             X = block(X)
#         return X
#a
# net = MySequential(nn.Linear(20,256),nn.ReLU(),nn.Linear(256,10))
# net(X)


# def corr2d(X,K):
#     h , w =K.shape
#     Y = torch.zeros((X.shape[0]-h+1,X.shape[1]-h+1))
#     for i in range(Y.shape[0]):
#         for j in range(Y.shape[1]):
#             Y[i,j] = (X[i:i+h,j:j+w] * K).sum()
#     return Y
#
# X = torch.tensor([[0.0,1.0,2.0],[3.0,4.0,5.0],[6.0,7.0,8.0]])
# K =torch.tensor([[0.0,1.0],[2.0,3.0]])
# Y = corr2d(X,K)
# print(Y)
#
#
# class Conv2D(nn.Module):
#     def __init__(self,kernel_size):
#         super().__init__()
#         self.weight = nn.Parameter(torch.rand(kernel_size))
#         self.bias = nn.Parameter(torch.zeros(1))
#
#     def forward(self,x):
#         return Conv2D(x,self.weight) + self.bias
#
# 学习卷积核

# conV2d = nn.Conv2d(1,1,kernel_size=(2,2),bias=False)
#
# X = X.reshape((1,1,3,3))
# Y = Y.reshape((1,1,2,2))
# print(X.shape)
# print(Y.shape)
#
# for i in range(10):
#     Y_hat = conV2d(X)
#     print(Y_hat)
#     loss = (Y_hat-Y).mean()
#     conV2d.zero_grad()
#     loss.sum().backward()
#     conV2d.weight.data[:] += 0.03 * conV2d.weight.grad
#     if((i+1) % 2 == 0):
#         print(f"batch{i+1},loss:{loss.sum():.3f}")
#
# print(conV2d.weight.data)

# def corr2d_multi_in(X,K):
#     return sum(d2l.corr2d(x , k) for x , k in zip(X,K))  # zip从第一个维度取出对应矩阵 然后互相关运算
#
# def corr2d_multi_in_out(X,K): # k变成思维
#     return torch.stack([corr2d_multi_in(X,k) for k in K],0)

# 池化层
def pool2d(X, pool_size , mode='max'):
    p_h,p_w = pool_size
    Y = torch.zeros((X.shape[0]-p_h+1,X.shape[1]-p_w+1))
    for i in range(Y.shape[0]):
        for j in range(Y.shape[1]):
            if mode=='max':
                Y[i,j] = X[i:i+p_h,j:j+p_w].max()
            if mode=='avg':
                Y[i,j] = X[i:i+p_h,j:j+p_w].mean()
    return Y

X = torch.tensor([[0.0,1.0,2.0],[3.0,4.0,5.0],[6.0,7.0,8.0]])
y = pool2d(X,(2,2))
print(y)

y = pool2d(X,(2,2),mode='avg')
print(y)






