import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import time

# 设置随机种子，确保结果可复现
torch.manual_seed(42)
np.random.seed(42)


# 定义3层神经网络模型
class NeuralNetwork(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(NeuralNetwork, self).__init__()
        self.layer1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.dropout1 = nn.Dropout(0.2)
        self.layer2 = nn.Linear(hidden_size, hidden_size)
        self.dropout2 = nn.Dropout(0.2)
        self.layer3 = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        x = x.view(x.size(0), -1)  # 将图像展平为向量
        out = self.layer1(x)
        out = self.relu(out)
        out = self.dropout1(out)
        out = self.layer2(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.layer3(out)
        return out


# 训练模型的函数
def train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs, device, scheduler=None):
    train_losses = []
    train_accs = []
    test_losses = []
    test_accs = []

    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        train_loss = 0.0
        correct = 0
        total = 0

        for i, (images, labels) in enumerate(train_loader):
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()

        # 学习率调整
        if scheduler:
            scheduler.step()

        train_loss = train_loss / len(train_loader)
        train_acc = 100.0 * correct / total
        train_losses.append(train_loss)
        train_accs.append(train_acc)

        # 测试阶段
        model.eval()
        test_loss = 0.0
        correct = 0
        total = 0

        with torch.no_grad():
            for images, labels in test_loader:
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)
                loss = criterion(outputs, labels)

                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()

        test_loss = test_loss / len(test_loader)
        test_acc = 100.0 * correct / total
        test_losses.append(test_loss)
        test_accs.append(test_acc)

        print(
            f'Epoch [{epoch + 1}/{num_epochs}], Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%, Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%')

    return train_losses, train_accs, test_losses, test_accs


# 可视化训练过程
def visualize_training(train_losses, train_accs, test_losses, test_accs, dataset_name):
    plt.figure(figsize=(12, 5))

    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title(f'{dataset_name} - Loss Curves')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(train_accs, label='Train Accuracy')
    plt.plot(test_accs, label='Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title(f'{dataset_name} - Accuracy Curves')
    plt.legend()

    plt.tight_layout()
    plt.savefig(f'{dataset_name}_training_curves.png')
    plt.show()


# 可视化预测结果
def visualize_predictions(model, test_loader, classes, device, dataset_name, num_images=16):
    model.eval()
    images_so_far = 0
    fig = plt.figure(figsize=(10, 10))

    with torch.no_grad():
        for i, (images, labels) in enumerate(test_loader):
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, preds = torch.max(outputs, 1)

            for j in range(images.size()[0]):
                images_so_far += 1
                ax = plt.subplot(num_images // 4, 4, images_so_far)
                ax.axis('off')

                # 对于FashionMNIST数据集
                if images.size(1) == 1:
                    img = images.cpu().data[j].numpy().squeeze()
                    ax.imshow(img, cmap='gray')
                # 对于CIFAR-10数据集
                else:
                    img = images.cpu().data[j].numpy().transpose((1, 2, 0))
                    mean = np.array([0.5, 0.5, 0.5])
                    std = np.array([0.5, 0.5, 0.5])
                    img = std * img + mean
                    img = np.clip(img, 0, 1)
                    ax.imshow(img)

                ax.set_title(f'Pred: {classes[preds[j]]}\nTrue: {classes[labels[j]]}',
                             color=("green" if preds[j] == labels[j] else "red"))

                if images_so_far == num_images:
                    plt.tight_layout()
                    plt.savefig(f'{dataset_name}_predictions.png')
                    plt.show()
                    return


# 主函数
def main():
    # 检查是否有可用的GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 定义数据集
    datasets = {
        'FashionMNIST': {
            'transform': transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5,), (0.5,))
            ]),
            'num_classes': 10,
            'input_size': 28 * 28,
            'hidden_size': 256,  # 增加隐藏层大小
            'classes': ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
                        'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'],
            'batch_size': 128,
            'num_epochs': 20,
            'lr': 0.001
        },
        'CIFAR10': {
            'transform': transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
            ]),
            'num_classes': 10,
            'input_size': 32 * 32 * 3,
            'hidden_size': 512,  # 增加隐藏层大小
            'classes': ['airplane', 'automobile', 'bird', 'cat', 'deer',
                        'dog', 'frog', 'horse', 'ship', 'truck'],
            'batch_size': 128,
            'num_epochs': 30,
            'lr': 0.001
        }
    }

    # 训练和评估每个数据集
    for dataset_name, config in datasets.items():
        print(f"\n{'=' * 50}\nTraining on {dataset_name} dataset\n{'=' * 50}")

        # 加载数据集
        if dataset_name == 'FashionMNIST':
            train_dataset = torchvision.datasets.FashionMNIST(root='./data', train=True,
                                                              download=True, transform=config['transform'])
            test_dataset = torchvision.datasets.FashionMNIST(root='./data', train=False,
                                                             download=True, transform=config['transform'])
        else:
            train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                                         download=True, transform=config['transform'])
            test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                                        download=True, transform=config['transform'])

        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config['batch_size'],
                                                   shuffle=True, num_workers=2)
        test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=config['batch_size'],
                                                  shuffle=False, num_workers=2)

        # 初始化模型
        model = NeuralNetwork(config['input_size'], config['hidden_size'], config['num_classes']).to(device)

        # 定义损失函数和优化器
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=config['lr'])

        # 学习率调度器
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.7)

        # 训练模型
        start_time = time.time()
        train_losses, train_accs, test_losses, test_accs = train_model(
            model, train_loader, test_loader, criterion, optimizer, config['num_epochs'], device, scheduler
        )
        training_time = time.time() - start_time
        print(f"Training completed in {training_time:.2f} seconds")

        # 可视化训练过程
        visualize_training(train_losses, train_accs, test_losses, test_accs, dataset_name)

        # 可视化预测结果
        visualize_predictions(model, test_loader, config['classes'], device, dataset_name)

        # 保存模型
        torch.save(model.state_dict(), f'{dataset_name}_model.pth')


if __name__ == "__main__":
    main()
