import torch
from torch import nn


# 卷积
def corr2d(X, K):
    h, w = K.shape
    # 输出矩阵Y
    Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))

    for i in range(Y.shape[0]):
        for j in range(Y.shape[1]):
            Y[i, j] = (X[i: i + h, j: j + w] * K).sum()

    return Y


# 池化
def pool2d(X, pool_size):
    X = X.float()
    p_h, p_w = pool_size
    Y = torch.zeros(X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)

    ########## begin ##########
    for i in range(Y.shape[0]):
        for j in range(Y.shape[1]):
            Y[i, j] = X[i: i + p_h, j: j + p_w].max()

            ########## end ##########
    return Y

# LeNet5
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        '''
        self.conv = nn.Sequential(
            nn.Conv2d(1, 6, 5),  # 卷积层
            nn.Sigmoid(),  # 激活函数

            nn.MaxPool2d(2, 2),  # 最大池化层

            nn.Conv2d(6, 16, 5),  # 卷积层
            nn.Sigmoid(),  # 激活函数

            nn.MaxPool2d(2, 2)  # 最大池化层

        )

        self.fc = nn.Sequential(
            nn.Linear(400, 120),  # 全连接层
            nn.Sigmoid(),  # 激活函数

            nn.Linear(120, 84),  # 全连接层
            nn.Sigmoid(),  # 激活函数

            nn.Linear(84, 10)  # 全连接层

        )
        '''
        self.C1 = nn.Conv2d(1, 6, (5, 5))
        self.activate1 = nn.Sigmoid()

        self.S2 = nn.MaxPool2d((2, 2), 2)

        self.C3 = nn.Conv2d(6, 16, (5, 5))
        self.activate3 = nn.Sigmoid()

        self.S4 = nn.MaxPool2d((2, 2), 2)

        # self.C5 = nn.Conv2d(16, 120, (5, 5))
        self.C5 = nn.Linear(400, 120)
        self.activate5 = nn.Sigmoid()

        self.FC6 = nn.Linear(120, 84)
        self.activate6 = nn.Sigmoid()

        self.FC7 = nn.Linear(84, 10)

    def forward(self, img):
        '''
        feature = self.conv(img)  # 卷积层
        # output = self.fc(feature.view(img.shape[0], -1))  # 全连接层
        feature = feature.view(feature.size(0), -1)
        output = self.fc(feature)
        return output
        '''
        print(img.shape)
        img = self.C1(img)
        img = self.activate1(img)
        print(img.shape)

        img = self.S2(img)
        print(img.shape)

        img = self.C3(img)
        img = self.activate3(img)
        print(img.shape)

        img = self.S4(img)
        print("S4: ", img.shape)

        img = img.view(img.size(0), -1)
        print("铺平: ", img.shape)

        img = self.C5(img)
        img = self.activate5(img)
        print("FC5: ", img.shape)

        img = self.FC6(img)
        img = self.activate6(img)
        print(img.shape)

        img = self.FC7(img)
        print(img.shape)

# VGG16
class VGG16(nn.Module):
    def __init__(self, num_classes=1000):
        super(VGG16, self).__init__()

        self.conv1 = nn.Conv2d(3, 64, (1, 1))
        self.ReLU1 = nn.ReLU()

        self.conv2 = nn.Conv2d(64, 64, (1, 1))
        self.ReLU2 = nn.ReLU()

        self.maxpool3 = nn.MaxPool2d((2, 2), 2)

        self.conv4 = nn.Conv2d(64, 128, (1, 1))
        self.ReLU4 = nn.ReLU()

        self.conv5 = nn.Conv2d(128, 128, (1, 1))
        self.ReLU5 = nn.ReLU()

        self.maxpool6 = nn.MaxPool2d((2, 2), 2)

        self.conv7 = nn.Conv2d(128, 256, (1, 1))
        self.ReLU7 = nn.ReLU()

        self.conv8 = nn.Conv2d(256, 256, (1, 1))
        self.ReLU8 = nn.ReLU()

        self.conv9 = nn.Conv2d(256, 256, (1, 1))
        self.ReLU9 = nn.ReLU()

        self.maxpool10 = nn.MaxPool2d((2, 2), 2)

        self.conv11 = nn.Conv2d(256, 512, (1, 1))
        self.ReLU11 = nn.ReLU()

        self.conv12 = nn.Conv2d(512, 512, (1, 1))
        self.ReLU12 = nn.ReLU()

        self.conv13 = nn.Conv2d(512, 512, (1, 1))
        self.ReLU13 = nn.ReLU()

        self.maxpool14 = nn.MaxPool2d((2, 2), 2)

        self.conv15 = nn.Conv2d(512, 512, (1, 1))
        self.ReLU15 = nn.ReLU()

        self.conv16 = nn.Conv2d(512, 512, (1, 1))
        self.ReLU16 = nn.ReLU()

        self.conv17 = nn.Conv2d(512, 512, (1, 1))
        self.ReLU17 = nn.ReLU()

        self.maxpool18 = nn.MaxPool2d((2, 2), 2)

        self.FC1 = nn.Linear(25088, 4096)
        self.FC2 = nn.Linear(4096, 1000)

        '''
        self.features = nn.Sequential(
            ########## Begin ##########


            ########## End ##########

        )
        self.classifier = nn.Sequential(
            ########## Begin ##########


            ########## End ##########

        )
        '''

    def forward(self, x):
        ########## Begin ##########
        print(x.shape)
        x = self.conv1(x)
        x = self.ReLU1(x)
        print(x.shape)
        x = self.conv2(x)
        x = self.ReLU2(x)
        print(x.shape)
        x = self.maxpool3(x)
        print(x.shape)
        x = self.conv4(x)
        x = self.ReLU4(x)
        print(x.shape)
        x = self.conv5(x)
        x = self.ReLU5(x)
        print(x.shape)
        x = self.maxpool6(x)
        print(x.shape)
        x = self.conv7(x)
        x = self.ReLU7(x)
        print(x.shape)
        x = self.conv8(x)
        x = self.ReLU8(x)
        print(x.shape)
        x = self.conv9(x)
        x = self.ReLU9(x)
        print(x.shape)
        x = self.maxpool10(x)
        print(x.shape)
        x = self.conv11(x)
        x = self.ReLU11(x)
        print(x.shape)
        x = self.conv12(x)
        x = self.ReLU12(x)
        print(x.shape)
        x = self.conv13(x)
        x = self.ReLU13(x)
        print(x.shape)
        x = self.maxpool14(x)
        print(x.shape)
        x = self.conv15(x)
        x = self.ReLU15(x)
        print(x.shape)
        x = self.conv16(x)
        x = self.ReLU16(x)
        print(x.shape)
        x = self.conv17(x)
        x = self.ReLU17(x)
        print(x.shape)
        x = self.maxpool18(x)
        print(x.shape)
        x = x.view(x.size(0), -1)
        print("铺平", x.shape)
        x = self.FC1(x)
        print(x.shape)
        x = self.FC2(x)
        print(x.shape)
        return x


# 输入矩阵X与核矩阵K
X = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])

K = torch.tensor([[0, 1], [2, 3]])
print(corr2d(X, K))


# Train

import torch.utils.data as Data
from torch import optim
import os
import numpy as np
from model import LeNet5
import torch
import torch.nn as nn
from torchvision import datasets, transforms
import matplotlib.pyplot as plt

loss_list = []
batch_list = []
epochs = 3
num = 0
data_train = datasets.MNIST('F:\PycharmProjects\pythonProject\mnist-data',
                            download=False,
                            transform=transforms.Compose([
                                transforms.Resize((32, 32)),
                                transforms.ToTensor()]))
data_train_loader = torch.utils.data.DataLoader(data_train,
                                                batch_size=512,
                                                shuffle=True,
                                                num_workers=8)

net = LeNet5()
criterion = nn.CrossEntropyLoss()
loss_func = nn.BCELoss(reduction='mean')
optimizer = optim.Adam(net.parameters(), lr=0.01)

def train():
    for epoch in range(epochs):
        for i, (images, labels) in enumerate(data_train_loader):
            optimizer.zero_grad()
            # print(images)
            # print("image: ", images.shape)
            output = net(images)
            output = torch.squeeze(output)
            print(output)
            print(labels)
            loss = criterion(output, labels)
            loss_list.append(loss.detach().cpu().item())
            batch_list.append(i + 1)

            if i % 10 == 0:
                print('Train - Epoch %d, Batch: %d, Loss: %f' % (epochs, i, loss.detach().cpu().item()))
                dummy_input = torch.randn(1, 1, 32, 32, requires_grad=True)  # 生成张量
            # save_model(net, optimizer, epoch + 1, 'lenet', 'mnist', dummy_input)  # 保存模型文件，注意save_model需要单独实现
            loss.backward()
            optimizer.step()
            # num += 1
    print(len(loss_list))
    x = np.linspace(0, len(loss_list), len(loss_list))
    print(x.shape)
    plt.plot(x, loss_list)
    plt.show()

def main():
    train()
    torch.save(net, "./LeNet5.pkl")


if __name__ == '__main__':
    main()
