import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import DataLoader

# 设置随机种子确保结果可复现
torch.manual_seed(42)
np.random.seed(42)

# 定义数据集和超参数
DATASETS = ['FashionMNIST', 'CIFAR10']
EPOCHS = {
    'FashionMNIST': 15,
    'CIFAR10': 30
}
BATCH_SIZE = 64
LEARNING_RATE = 0.001

# 定义标签名称
FASHION_MNIST_CLASSES = [
    'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
    'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'
]
CIFAR10_CLASSES = [
    'airplane', 'automobile', 'bird', 'cat', 'deer',
    'dog', 'frog', 'horse', 'ship', 'truck'
]


# 数据预处理
def get_transform(dataset_name):
    if dataset_name == 'FashionMNIST':
        return transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.2860,), (0.3530,))
        ])
    else:  # CIFAR10
        return transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])


# 加载数据集
def load_dataset(dataset_name):
    transform = get_transform(dataset_name)
    dataset_class = getattr(datasets, dataset_name)

    train_dataset = dataset_class('./data', train=True, download=True, transform=transform)
    test_dataset = dataset_class('./data', train=False, transform=transform)

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=1000)

    return train_loader, test_loader, train_dataset, test_dataset


# 定义三层神经网络
class NeuralNetwork(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(NeuralNetwork, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, num_classes)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = x.view(x.size(0), -1)
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.fc3(x)
        return x


# 训练函数
def train(model, train_loader, criterion, optimizer, epoch, device):
    model.train()
    train_loss = 0
    correct = 0
    total = 0

    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = output.max(1)
        total += target.size(0)
        correct += predicted.eq(target).sum().item()

        if batch_idx % 100 == 0:
            print(f'Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item():.4f}')

    return train_loss / len(train_loader), 100. * correct / total


# 评估函数（原test函数，已重命名）
def evaluate(model, test_loader, criterion, device):
    model.eval()
    test_loss = 0
    correct = 0

    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += criterion(output, target).item()
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader)
    accuracy = 100. * correct / len(test_loader.dataset)
    print(f'Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.2f}%)')

    return test_loss, accuracy


# 可视化训练过程
def visualize_training(dataset_name, train_losses, train_accuracies, test_losses, test_accuracies):
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.title(f'{dataset_name} Training and Test Loss')

    plt.subplot(1, 2, 2)
    plt.plot(train_accuracies, label='Train Accuracy')
    plt.plot(test_accuracies, label='Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.title(f'{dataset_name} Training and Test Accuracy')
    plt.tight_layout()
    plt.savefig(f'{dataset_name}_training_metrics.png')
    plt.show()


# 可视化预测结果
def visualize_predictions(model, test_loader, classes, dataset_name, device):
    model.eval()
    images, labels = next(iter(test_loader))
    images, labels = images.to(device), labels.to(device)

    with torch.no_grad():
        outputs = model(images)
        _, predicted = torch.max(outputs, 1)

    plt.figure(figsize=(15, 6))
    for i in range(10):
        plt.subplot(2, 5, i + 1)
        if dataset_name == 'FashionMNIST':
            plt.imshow(images[i].cpu().squeeze().numpy(), cmap='gray')
        else:
            img = images[i].cpu().numpy().transpose((1, 2, 0))
            img = (img * 0.5) + 0.5  # 反归一化
            plt.imshow(img)
        plt.title(f'Pred: {classes[predicted[i].item()]}, True: {classes[labels[i].item()]}',
                  color=('green' if predicted[i] == labels[i] else 'red'))
        plt.axis('off')
    plt.tight_layout()
    plt.savefig(f'{dataset_name}_predictions.png')
    plt.show()


# 主函数
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'Using device: {device}')

    for dataset_name in DATASETS:
        print(f'\n=== Training on {dataset_name} ===')

        # 加载数据集
        train_loader, test_loader, train_dataset, test_dataset = load_dataset(dataset_name)

        # 确定输入大小和类别数量
        if dataset_name == 'FashionMNIST':
            input_size = 28 * 28
            num_classes = 10
            classes = FASHION_MNIST_CLASSES
        else:
            input_size = 32 * 32 * 3
            num_classes = 10
            classes = CIFAR10_CLASSES

        # 初始化模型、损失函数和优化器
        model = NeuralNetwork(input_size, 128, num_classes).to(device)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)

        # 训练模型
        epochs = EPOCHS[dataset_name]
        train_losses, train_accuracies = [], []
        test_losses, test_accuracies = [], []

        for epoch in range(1, epochs + 1):
            train_loss, train_accuracy = train(model, train_loader, criterion, optimizer, epoch, device)
            test_loss, test_accuracy = evaluate(model, test_loader, criterion, device)  # 修改了函数名

            train_losses.append(train_loss)
            train_accuracies.append(train_accuracy)
            test_losses.append(test_loss)
            test_accuracies.append(test_accuracy)

        # 可视化训练过程
        visualize_training(dataset_name, train_losses, train_accuracies, test_losses, test_accuracies)

        # 可视化预测结果
        visualize_predictions(model, test_loader, classes, dataset_name, device)


if __name__ == '__main__':
    main()