import torch.nn as nn

input_size = 28  # 图像的宽和高
out_size = 15  # 预测的输出


class StockCnn(nn.Module):
    def __init__(self):
        super(StockCnn, self).__init__()
        """
        原始图 输入数据 h*w*c=28*28*1 c=深度 如RGB图像=3 
        卷积核 h*w*c h=w=定义的核尺寸 c=input_channels 相当于卷积函数的权重值，默认随机初始化,
                会随着反向传播进行更新

        特征图 原始图像通过卷积生成特征图 输出：
            特征图的尺寸计算：
            W1,H1 = 输入的宽高  W2,H2 = 输出的宽高 
            Fh = 卷积核的高度 Fw = 卷积核的宽  
            S = 滑动窗口的步长 P = 边界填充
                      H1 - Fh + 2 * P                 W1 - Fw + 2 * P
                H2 = ---------------  + 1        W2 = ---------------  + 1
                           S                                S
                H1, Fh, P, S = 28, 3, 1, 1
                print("H2 = ", math.ceil((H1 - Fh + 2 * P) / S) + 1) == 28
                最终还要乘以输出通道:H2=28*W2=28*output_channels=16
        卷积层 提取特征用,输出通道自定，但输出通道是下一次卷积的输入通道
        池化层 对卷积后的图进行压缩,只对某个图进行池化,有最大值池化max
                输出通道不变
                       P - F + 2 * P
                p0= -------------------------- + 1
                            S
        """
        self.conv1 = nn.Sequential(
            nn.Conv2d(
                in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=2
            ),  # 卷积层输出 (H1 - Fh + 2 * P) / S) + 1) = (28 - 3 + 2 * 2)/1 + 1 = 30
            # nn.ReLU(),
            # nn.Sigmoid(),
            nn.Tanh(),
            nn.MaxPool2d(
                kernel_size=2  # 步长默认与核心数相同 即s=2
            ),  # 池化层输出  N=(W-F+2P)/S+1=(30 - 2 + 2 * 0)/2 + 1 = 16
        )
        # 一次卷积后的输出通道为16，则二次的输入通道为16
        self.conv2 = nn.Sequential(
            nn.Conv2d(
                in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=2
            ),  # 卷积层输出 (H1 - Fh + 2 * P) / S) + 1) = (16 - 3 + 2 * 2)/1 + 1 = 18
            # nn.ReLU(),  # 激活
            # nn.Sigmoid(),
            nn.Tanh(),
            nn.MaxPool2d(
                kernel_size=2
            ),  # 池化层输出  N=(W-F+2P)/S+1=(16 - 3 + 2 * 2)/2 + 1 = 8
        )
        # # 激活函数
        self.relu = nn.ReLU()
        # self.relu = nn.Sigmoid()
        # self.relu = nn.Tanh()
        # 全连接层
        self.out1 = nn.Linear(in_features=32 * 8 * 8, out_features=84)  # 输出特征84个
        # self.out1 = nn.Linear(in_features=28 * 28, out_features=84)  # 输出特征84个
        self.out2 = nn.Linear(in_features=84, out_features=15)  # 预测维度 15个

    def forward(self, x):
        # print("x ", x.to("cpu").detach().numpy())
        x = self.conv1(x)
        # print("x1", x.to("cpu").detach().numpy())
        x = self.conv2(x)
        # print("x2", x.to("cpu").detach().numpy())
        x = x.view(x.size(0), -1)  # 将卷积池化后的三维数据转成一维数据 tuple
        output = self.out1(x)
        output = self.relu(output)
        output = self.out2(output)
        output = self.relu(output)
        return output, x
