import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm

# 设置随机种子以确保结果可复现
torch.manual_seed(42)
np.random.seed(42)


# 定义神经网络模型
class NeuralNetwork(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes, is_cifar=False):
        super(NeuralNetwork, self).__init__()
        # 添加展平层
        self.flatten = nn.Flatten()
        # 3层神经网络
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        # 对所有输入数据进行展平操作
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        x = self.relu(x)
        x = self.fc3(x)
        return x


# 训练模型函数
def train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs, device):
    train_losses = []
    train_accuracies = []
    test_losses = []
    test_accuracies = []

    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        train_loss = 0.0
        correct = 0
        total = 0

        for inputs, labels in tqdm(train_loader, desc=f'Epoch {epoch + 1}/{num_epochs}'):
            inputs, labels = inputs.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()

        train_loss = train_loss / len(train_loader)
        train_accuracy = 100.0 * correct / total

        train_losses.append(train_loss)
        train_accuracies.append(train_accuracy)

        # 测试阶段
        model.eval()
        test_loss = 0.0
        correct = 0
        total = 0

        with torch.no_grad():
            for inputs, labels in test_loader:
                inputs, labels = inputs.to(device), labels.to(device)

                outputs = model(inputs)
                loss = criterion(outputs, labels)

                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()

        test_loss = test_loss / len(test_loader)
        test_accuracy = 100.0 * correct / total

        test_losses.append(test_loss)
        test_accuracies.append(test_accuracy)

        print(
            f'Epoch {epoch + 1}/{num_epochs}, Train Loss: {train_loss:.4f}, Train Acc: {train_accuracy:.2f}%, Test Loss: {test_loss:.4f}, Test Acc: {test_accuracy:.2f}%')

    return train_losses, train_accuracies, test_losses, test_accuracies


# 可视化训练过程
def plot_training_history(train_losses, train_accuracies, test_losses, test_accuracies, dataset_name):
    plt.figure(figsize=(12, 5))

    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.title(f'{dataset_name} - Loss Curves')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(train_accuracies, label='Train Accuracy')
    plt.plot(test_accuracies, label='Test Accuracy')
    plt.title(f'{dataset_name} - Accuracy Curves')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()

    plt.tight_layout()
    plt.savefig(f'{dataset_name}_training_history.png')
    plt.show()


# 可视化预测结果
def visualize_predictions(model, test_loader, class_names, device, dataset_name, num_samples=10):
    model.eval()
    images, labels = next(iter(test_loader))
    images, labels = images[:num_samples].to(device), labels[:num_samples].to(device)

    with torch.no_grad():
        outputs = model(images)
        _, predicted = outputs.max(1)

    images = images.cpu().numpy()
    predicted = predicted.cpu().numpy()
    labels = labels.cpu().numpy()

    plt.figure(figsize=(15, 6))
    for i in range(num_samples):
        plt.subplot(2, 5, i + 1)

        # 处理不同数据集的图像格式
        if dataset_name == 'CIFAR-10':
            # CIFAR-10是3通道图像，需要转置为(H,W,C)格式
            img = np.transpose(images[i], (1, 2, 0))
            # 图像归一化，从[-1,1]转回[0,1]
            img = (img + 1) / 2.0
        else:
            # FashionMNIST是单通道图像
            img = images[i].squeeze()

        plt.imshow(img, cmap='gray' if dataset_name == 'FashionMNIST' else None)
        plt.title(f'Pred: {class_names[predicted[i]]}\nTrue: {class_names[labels[i]]}')
        plt.axis('off')

    plt.tight_layout()
    plt.savefig(f'{dataset_name}_predictions.png')
    plt.show()


# 主函数
def main():
    # 检查是否有可用的GPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 训练参数
    batch_size = 128
    learning_rate = 0.001
    num_epochs = 20

    # 处理FashionMNIST数据集
    print("\n=== 处理FashionMNIST数据集 ===")
    # 数据预处理
    transform_fashion = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5,), (0.5,))
    ])

    # 加载FashionMNIST数据集
    train_dataset_fashion = torchvision.datasets.FashionMNIST(
        root='./data', train=True, download=True, transform=transform_fashion)
    test_dataset_fashion = torchvision.datasets.FashionMNIST(
        root='./data', train=False, download=True, transform=transform_fashion)

    train_loader_fashion = torch.utils.data.DataLoader(
        train_dataset_fashion, batch_size=batch_size, shuffle=True)
    test_loader_fashion = torch.utils.data.DataLoader(
        test_dataset_fashion, batch_size=batch_size, shuffle=False)

    # 定义FashionMNIST的类别名称
    fashion_classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
                       'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

    # 创建模型
    model_fashion = NeuralNetwork(input_size=784, hidden_size=128, num_classes=10).to(device)

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model_fashion.parameters(), lr=learning_rate)

    # 训练模型
    print("训练FashionMNIST模型...")
    train_losses_fashion, train_accuracies_fashion, test_losses_fashion, test_accuracies_fashion = train_model(
        model_fashion, train_loader_fashion, test_loader_fashion, criterion, optimizer, num_epochs, device)

    # 保存模型
    torch.save(model_fashion.state_dict(), 'fashion_mnist_model.pth')

    # 可视化训练历史
    plot_training_history(train_losses_fashion, train_accuracies_fashion, test_losses_fashion, test_accuracies_fashion,
                          'FashionMNIST')

    # 可视化预测结果
    visualize_predictions(model_fashion, test_loader_fashion, fashion_classes, device, 'FashionMNIST')

    # 处理CIFAR-10数据集
    print("\n=== 处理CIFAR-10数据集 ===")
    # 数据预处理
    transform_cifar = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    # 加载CIFAR-10数据集
    train_dataset_cifar = torchvision.datasets.CIFAR10(
        root='./data', train=True, download=True, transform=transform_cifar)
    test_dataset_cifar = torchvision.datasets.CIFAR10(
        root='./data', train=False, download=True, transform=transform_cifar)

    train_loader_cifar = torch.utils.data.DataLoader(
        train_dataset_cifar, batch_size=batch_size, shuffle=True)
    test_loader_cifar = torch.utils.data.DataLoader(
        test_dataset_cifar, batch_size=batch_size, shuffle=False)

    # 定义CIFAR-10的类别名称
    cifar_classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
                     'dog', 'frog', 'horse', 'ship', 'truck']

    # 创建模型
    model_cifar = NeuralNetwork(input_size=3072, hidden_size=256, num_classes=10).to(device)

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model_cifar.parameters(), lr=learning_rate)

    # 训练模型
    print("训练CIFAR-10模型...")
    train_losses_cifar, train_accuracies_cifar, test_losses_cifar, test_accuracies_cifar = train_model(
        model_cifar, train_loader_cifar, test_loader_cifar, criterion, optimizer, num_epochs, device)

    # 保存模型
    torch.save(model_cifar.state_dict(), 'cifar10_model.pth')

    # 可视化训练历史
    plot_training_history(train_losses_cifar, train_accuracies_cifar, test_losses_cifar, test_accuracies_cifar,
                          'CIFAR-10')

    # 可视化预测结果
    visualize_predictions(model_cifar, test_loader_cifar, cifar_classes, device, 'CIFAR-10')


if __name__ == "__main__":
    main()
