import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras import layers
import os
import warnings

warnings.filterwarnings('ignore')  # 忽略所有警告

# 设置中文字体显示
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False

# 设置随机种子
tf.random.set_seed(42)
np.random.seed(42)

# 超参数 - 减少批次大小和epochs
BUFFER_SIZE = 100
BATCH_SIZE = 32  # 减小批次大小
EPOCHS = 2000  # 减少训练轮数
NOISE_DIM = 100
NUM_EXAMPLES_TO_GENERATE = 16

# 加载和预处理数据 - 只取前100张图片
(train_images, _), (_, _) = tf.keras.datasets.mnist.load_data()
train_images = train_images[:10]  # 只取100张图片
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5  # 归一化到 [-1, 1]
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)

print(f"使用图片数量: {len(train_images)}")
print(f"批次大小: {BATCH_SIZE}")
print(f"训练轮数: {EPOCHS}")

# 简化版生成器模型
generator = tf.keras.Sequential([
    layers.Dense(7 * 7 * 128, use_bias=False, input_shape=(NOISE_DIM,)),
    layers.BatchNormalization(),
    layers.LeakyReLU(),
    layers.Reshape((7, 7, 128)),

    layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False),
    layers.BatchNormalization(),
    layers.LeakyReLU(),

    layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')
])

# 简化版判别器模型
discriminator = tf.keras.Sequential([
    layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]),
    layers.LeakyReLU(),
    layers.Dropout(0.3),

    layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'),
    layers.LeakyReLU(),
    layers.Dropout(0.3),

    layers.Flatten(),
    layers.Dense(1)
])

# 损失函数和优化器
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)


# 判别器损失函数
def discriminator_loss(real_output, fake_output):
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
    total_loss = real_loss + fake_loss
    return total_loss


# 生成器损失函数
def generator_loss(fake_output):
    return cross_entropy(tf.ones_like(fake_output), fake_output)


# 生成固定噪声用于可视化
seed = tf.random.normal([NUM_EXAMPLES_TO_GENERATE, NOISE_DIM])

# 存储训练历史
gen_losses = []
disc_losses = []

print("开始训练...")
# 训练循环
for epoch in range(EPOCHS):
    epoch_gen_loss = []
    epoch_disc_loss = []

    for image_batch in train_dataset:
        # 生成噪声
        noise = tf.random.normal([image_batch.shape[0], NOISE_DIM])

        # 使用tf.GradientTape记录计算过程
        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            # 生成图像
            generated_images = generator(noise, training=True)

            # 判别器对真实和生成图像的判断
            real_output = discriminator(image_batch, training=True)
            fake_output = discriminator(generated_images, training=True)

            # 计算损失
            gen_loss = generator_loss(fake_output)
            disc_loss = discriminator_loss(real_output, fake_output)

        # 计算梯度
        gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)

        # 应用梯度
        generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
        discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))

        epoch_gen_loss.append(gen_loss.numpy())
        epoch_disc_loss.append(disc_loss.numpy())

    # 记录平均损失
    avg_gen_loss = np.mean(epoch_gen_loss)
    avg_disc_loss = np.mean(epoch_disc_loss)
    gen_losses.append(avg_gen_loss)
    disc_losses.append(avg_disc_loss)

    # 每20个epoch输出一次生成效果
    if (epoch + 1) % 20 == 0 or epoch == 0:
        print(f'Epoch {epoch + 1}/{EPOCHS}, 生成器损失: {avg_gen_loss:.4f}, 判别器损失: {avg_disc_loss:.4f}')

        # 生成图像
        predictions = generator(seed, training=False)

        # 绘制生成图像
        plt.figure(figsize=(10, 4))

        # 显示4个生成样本
        for i in range(4):
            plt.subplot(2, 4, i + 1)
            plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
            plt.title(f'生成样本 {i + 1}')
            plt.axis('off')

        # 显示4个真实样本
        real_samples = train_images[:4]
        for i in range(4):
            plt.subplot(2, 4, i + 5)
            plt.imshow(real_samples[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
            plt.title(f'真实样本 {i + 1}')
            plt.axis('off')

        plt.suptitle(f'第 {epoch + 1} 轮训练结果 (使用100张MNIST图片)', fontsize=14)
        plt.tight_layout()
        plt.show()

# 绘制训练损失曲线
plt.figure(figsize=(10, 6))
plt.plot(gen_losses, label='生成器损失', linewidth=2)
plt.plot(disc_losses, label='判别器损失', linewidth=2)
plt.xlabel('训练轮数')
plt.ylabel('损失值')
plt.title('DCGAN训练损失曲线 (使用100张图片)')
plt.legend()
plt.grid(True, alpha=0.3)
plt.show()

# 最终生成效果 - 显示所有16个生成样本
print("训练完成！最终生成效果：")
final_predictions = generator(seed, training=False)
plt.figure(figsize=(8, 8))
for i in range(final_predictions.shape[0]):
    plt.subplot(4, 4, i + 1)
    plt.imshow(final_predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
    plt.axis('off')
plt.suptitle('DCGAN最终生成结果 (使用100张MNIST图片训练)', fontsize=14)
plt.tight_layout()
plt.show()

# 显示一些真实样本作为对比
print("\n真实MNIST样本对比：")
plt.figure(figsize=(8, 8))
real_samples = train_images[:16]
for i in range(real_samples.shape[0]):
    plt.subplot(4, 4, i + 1)
    plt.imshow(real_samples[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
    plt.axis('off')
plt.suptitle('真实MNIST样本 (用于训练)', fontsize=14)
plt.tight_layout()
plt.show()

# 显示模型结构
print("\n生成器模型结构:")
generator.summary()
print("\n判别器模型结构:")
discriminator.summary()

print(f"\n训练完成！总共使用了 {len(train_images)} 张图片进行训练。")
print("可以看到生成器逐渐学习到了数字的基本形状。")