import tensorflow as tf
from tensorflow.keras import datasets  # 导入经典数据集
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import models
from tensorflow.keras import optimizers
'''
# 查看tensorflow.keras.datasets模块中的所有可用数据集
print("=" * 60)
print("tensorflow.keras.datasets 中的所有自带数据集")
print("=" * 60)

# 获取模块中的所有公共属性（不以下划线开头的）
dataset_attributes = [name for name in dir(datasets) if not name.startswith('_')]

# 打印所有数据集函数
for i, dataset_name in enumerate(sorted(dataset_attributes), 1):
    print(f"{i}. {dataset_name}")

print(f"\n总共 {len(dataset_attributes)} 个内置数据集")
'''

# 加载MNIST数据集
(x, y), (x_test, y_test) = datasets.mnist.load_data()
print('x:', x.shape, 'y:', y.shape, 'x_test:', x_test.shape, 'y_test:', y_test.shape)
print(x)
# 将加载的数据转换为Dataset对象
train = tf.data.Dataset.from_tensor_slices((x, y))
print(train)
test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
# # 显示前10个训练图片
# plt.figure(figsize=(10, 5))
# for i in range(10):
#     plt.subplot(2, 5, i + 1)
#     plt.imshow(x[i], cmap='gray')
#     plt.title(f"Label: {y[i]}")
#     plt.axis('off')
#
# plt.tight_layout()
# plt.show()

# MNIST数据集预处理
train = train.shuffle(10000)  # 随机打散样本，不会打乱样本与标签的映射关系
# 统一的预处理函数
def preprocess(x, y):
    # 标准化到0-1
    x = tf.cast(x, dtype=tf.float32) / 255.
    x = tf.reshape(x, [28*28])  # 统一形状为 [784]
    y = tf.cast(y, dtype=tf.int32)
    y = tf.one_hot(y, depth=10)  # one-hot编码
    return x, y

# 应用预处理函数
train = train.map(preprocess)
test = test.map(preprocess)

# 批处理
batch_size = 128
train = train.batch(batch_size)
test = test.batch(batch_size)

'''
# 构建网络
# 构建输入层
x = tf.keras.Input(shape=(32,))
y = tf.keras.layers.Dense(16, activation='softmax')(x)
model = tf.keras.Model(x, y)
print(model)

model = tf.keras.Sequential()
# 展平为一维数组
model.add(tf.keras.layers.Flatten(input_shape=(28, 28)))
model.add(tf.keras.layers.Dense(10, activation='softmax'))

# 构建隐藏层
# 构建单个全连接层
model = tf.keras.models.Sequential()
# 输入矩阵的大小为 (None, 16)
model.add(tf.keras.Input(shape=(16,)))
model.add(tf.keras.layers.Dense(32, activation='relu'))
# 输出的大小为 (None, 32)
print('输出大小：', model.output_shape)

# 构建多个全连接层
model = tf.keras.models.Sequential()
# 输入矩阵的大小为 (None, 1)
model.add(tf.keras.Input(shape=(1,)))
# 定义第一个全连接层
model.add(tf.keras.layers.Dense(5, activation='sigmoid'))
# 定义第二个全连接层
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))

# 构建输出层
# 使用tanh激活函数
x = tf.linspace(-6., 6., 10)
tf.tanh(x)  # tanh激活函数

model = tf.keras.models.Sequential()
model.add(tf.keras.Input(shape=(10,)))
# 定义全连接层
model.add(tf.keras.layers.Dense(12, activation='relu'))
# 输出层
model.add(tf.keras.layers.Dense(1, activation='softmax'))
'''

model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(512, activation='relu', input_shape=(784,)))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.summary()

# 编译模型
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])

# 训练模型
history = model.fit(train,epochs=10,validation_data=test)

# 性能评估
test_loss, test_acc = model.evaluate(test)
print(f'Test accuracy: {test_acc:.4f}')

# 绘制训练过程
plt.figure(figsize=(12, 4))

plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Model Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Model Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.tight_layout()
plt.show()

# 保存模型
model.save('mnist_model.h5')
print("Model saved as mnist_model.h5")

# 加载模型并进行预测
loaded_model = models.load_model('mnist_model.h5')

# 从测试集中取一些样本进行预测
test_images, test_labels = next(iter(test))
predictions = loaded_model.predict(test_images)

# 显示预测结果
plt.figure(figsize=(12, 8))
for i in range(12):
    plt.subplot(3, 4, i+1)
    # 将一维数据重塑为28x28图像
    img = test_images[i].numpy().reshape(28, 28)
    plt.imshow(img, cmap='gray')
    predicted_label = np.argmax(predictions[i])
    true_label = test_labels[i].numpy()
    plt.title(f'True: {true_label}, Pred: {predicted_label}')
    plt.axis('off')

plt.tight_layout()
plt.show()
