import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical


# 定义LeNet5的简单卷积神经网络
class LeNet5:
    def __init__(self):
        self.W1 = np.random.randn(20, 5, 5)
        self.b1 = np.zeros((1, 20))
        self.W2 = np.random.randn(50, 20)
        self.b2 = np.zeros((1, 50))
        self.W3 = np.random.randn(10, 50)
        self.b3 = np.zeros((1, 10))
        # 实现前向传播过程

    def forward(self, X):
        conv1 = self.convolution(X, self.W1, self.b1)
        pool1 = self.max_pooling(conv1)
        conv2 = self.convolution(pool1, self.W2, self.b2)
        pool2 = self.max_pooling(conv2)
        flattened = pool2.flatten()
        fc1 = self.fully_connected(flattened, self.W3, self.b3)
        return fc1

    # 执行卷积操作
    def convolution(X, W, b):

        n_filters, filter_height, filter_width = W.shape
        n_x, height, width = X.shape
        output_height = height - filter_height + 1
        output_width = width - filter_width + 1
        conv_out = np.zeros((n_filters, output_height, output_width))
        for i in range(n_filters):
            for j in range(output_height):
                for k in range(output_width):
                    conv_out[i, j, k] = np.sum(W[i] * X[:, j:j + filter_height, k:k + filter_width]) + b[i]
        return conv_out

    # 执行最大池化操作
    def max_pooling(X, pool_size=2, stride=2):
        n_filters, height, width = X.shape
        output_height = (height - pool_size) // stride + 1
        output_width = (width - pool_size) // stride + 1
        pool_out = np.zeros((n_filters, output_height, output_width))
        for i in range(n_filters):
            for j in range(output_height):
                for k in range(output_width):
                    pool_out[i, j, k] = np.max(
                        X[i, j * stride:j * stride + pool_size, k * stride:k * stride + pool_size])
        return pool_out

    # 执行全连接操作
    def fully_connected(X, W, b):
        return np.dot(X, W.T) + b


# 加载MNIST数据集
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# 数据预处理
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)


# 定义MLP模型
def MLP():
    inputs = Input(shape=(784,))
    x = Dense(120, activation='relu')(inputs)
    x = Dense(84, activation='relu')(x)
    outputs = Dense(10, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=outputs)
    return model


# MLP模型实例
mlp_model = MLP()

# 打印MLP模型结构
print(mlp_model.summary())
# 计算MLP模型的参数个数
mlp_total_params = np.sum([np.prod(v.shape.as_list()) for v in mlp_model.trainable_variables])
print("MLP Total trainable parameters: ", mlp_total_params)

# 计算LeNet模型参数个数
lenet_total_params = np.sum([np.prod(v.shape.as_list()) for v in lenet_model.trainable_variables])
print("LeNet Total trainable parameters: ", lenet_total_params)

# 比较参数个数
print("LeNet vs MLP Total trainable parameters comparison: ", lenet_total_params - mlp_total_params)
