import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import time
import json
from collections import defaultdict
from torchvision import datasets, transforms




def get_data_loaders(batch_size=128):
    """加载MNIST数据集"""
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('data', train=True, download=True, transform=transform),
        batch_size=batch_size, shuffle=True
    )

    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('data', train=False, transform=transform),
        batch_size=batch_size, shuffle=False
    )

    return train_loader, test_loader


train_loader, test_loader = get_data_loaders()
print(f"训练集样本数: {len(train_loader.dataset)}")
print(f"测试集样本数: {len(test_loader.dataset)}")
print(f"图像尺寸: {train_loader.dataset[0][0].shape}")





def visualize_samples(loader, num_samples=10):
    """可视化数据样本"""
    dataiter = iter(loader)
    images, labels = next(dataiter)

    plt.figure(figsize=(15, 3))
    for i in range(num_samples):
        plt.subplot(1, num_samples, i + 1)
        plt.imshow(images[i].squeeze(), cmap='gray')
        plt.title(f'Label: {labels[i].item()}')
        plt.axis('off')
    plt.tight_layout()
    plt.show()


train_loader, test_loader = get_data_loaders()
visualize_samples(train_loader)






class BasicMLP(nn.Module):
    """基础多层感知机模型"""

    def __init__(self, input_size=784, hidden_sizes=[128], output_size=10, activation='relu'):
        super(BasicMLP, self).__init__()

        layers = []
        prev_size = input_size

        for hidden_size in hidden_sizes:
            layers.append(nn.Linear(prev_size, hidden_size))
            if activation == 'relu':
                layers.append(nn.ReLU())
            elif activation == 'sigmoid':
                layers.append(nn.Sigmoid())
            elif activation == 'tanh':
                layers.append(nn.Tanh())
            layers.append(nn.Dropout(0.2))  # 添加dropout防止过拟合
            prev_size = hidden_size

        layers.append(nn.Linear(prev_size, output_size))
        self.network = nn.Sequential(*layers)

    def forward(self, x):
        x = x.view(x.size(0), -1)  # 展平输入
        return self.network(x)






class ModelTrainer:
    """模型训练器"""

    def __init__(self, model, optimizer, criterion):
        self.model = model
        self.optimizer = optimizer
        self.criterion = criterion
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)
        self.train_losses = []
        self.train_accuracies = []
        self.test_losses = []
        self.test_accuracies = []

    def train_epoch(self, train_loader):
        """训练一个epoch"""
        self.model.train()
        running_loss = 0.0
        correct = 0
        total = 0

        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(self.device), target.to(self.device)

            self.optimizer.zero_grad()
            output = self.model(data)
            loss = self.criterion(output, target)
            loss.backward()
            self.optimizer.step()

            running_loss += loss.item()
            _, predicted = output.max(1)
            total += target.size(0)
            correct += predicted.eq(target).sum().item()

        epoch_loss = running_loss / len(train_loader)
        epoch_acc = 100. * correct / total

        return epoch_loss, epoch_acc

    def test(self, test_loader):
        """测试模型性能"""
        self.model.eval()
        test_loss = 0
        correct = 0
        total = 0

        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                test_loss += self.criterion(output, target).item()
                _, predicted = output.max(1)
                total += target.size(0)
                correct += predicted.eq(target).sum().item()

        test_loss /= len(test_loader)
        test_acc = 100. * correct / total

        return test_loss, test_acc

    def train(self, train_loader, test_loader, epochs=10):
        """训练"""
        print(f"{'Epoch':^6} | {'Train Loss':^12} | {'Train Acc':^10} | {'Test Loss':^10} | {'Test Acc':^10}")
        print("-" * 70)

        for epoch in range(1, epochs + 1):
            train_loss, train_acc = self.train_epoch(train_loader)
            test_loss, test_acc = self.test(test_loader)

            self.train_losses.append(train_loss)
            self.train_accuracies.append(train_acc)
            self.test_losses.append(test_loss)
            self.test_accuracies.append(test_acc)

            print(f"{epoch:^6} | {train_loss:^12.4f} | {train_acc:^10.2f}% | {test_loss:^10.4f} | {test_acc:^10.2f}%")

    def plot_results(self, title):
        """训练结果可视化"""
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))

        # 损失
        ax1.plot(self.train_losses, label='Training Loss')
        ax1.set_title(f'{title} - Training Loss')
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Loss')
        ax1.legend()
        ax1.grid(True)
        ax2.plot(self.test_losses, label='Test Loss', color='orange')
        ax2.set_title(f'{title} - Test Loss')
        ax2.set_xlabel('Epoch')
        ax2.set_ylabel('Loss')
        ax2.legend()
        ax2.grid(True)

        # 准确率
        ax3.plot(self.train_accuracies, label='Training Accuracy', color='green')
        ax3.set_title(f'{title} - Training Accuracy')
        ax3.set_xlabel('Epoch')
        ax3.set_ylabel('Accuracy (%)')
        ax3.legend()
        ax3.grid(True)
        ax4.plot(self.test_accuracies, label='Test Accuracy', color='red')
        ax4.set_title(f'{title} - Test Accuracy')
        ax4.set_xlabel('Epoch')
        ax4.set_ylabel('Accuracy (%)')
        ax4.legend()
        ax4.grid(True)

        plt.tight_layout()
        plt.show()





def experiment_basic_mlp():
    print("=== 实验1: 基础单隐藏层MLP ===")
    train_loader, test_loader = get_data_loaders(batch_size=128)
    model = BasicMLP(hidden_sizes=[128], activation='relu')
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()
    trainer = ModelTrainer(model, optimizer, criterion)
    trainer.train(train_loader, test_loader, epochs=15)
    trainer.plot_results("Basic MLP (128 hidden units)")

    return trainer


basic_trainer = experiment_basic_mlp()




def experiment_network_structures():
    """网络结构对比实验"""
    print("=== 实验2: 网络结构影响 ===")

    structures = {
        'Small (64)': [64],
        'Medium (128)': [128],
        'Large (256)': [256],
        'Deep (64-32)': [64, 32],
        'Deep (128-64)': [128, 64],
        'Deep (256-128)': [256, 128]
    }

    results = {}
    train_loader, test_loader = get_data_loaders(batch_size=128)

    for name, hidden_sizes in structures.items():
        print(f"\n--- 测试网络结构: {name} ---")

        model = BasicMLP(hidden_sizes=hidden_sizes, activation='relu')
        optimizer = optim.Adam(model.parameters(), lr=0.001)
        criterion = nn.CrossEntropyLoss()

        # （简化版，只训练5个epoch用于对比）
        trainer = ModelTrainer(model, optimizer, criterion)

        for epoch in range(1, 6):
            train_loss, train_acc = trainer.train_epoch(train_loader)
            test_loss, test_acc = trainer.test(test_loader)

            if epoch == 5:
                results[name] = {
                    'train_acc': train_acc,
                    'test_acc': test_acc,
                    'train_loss': train_loss,
                    'test_loss': test_loss
                }

        print(f"{name}: 训练准确率: {train_acc:.2f}%, 测试准确率: {test_acc:.2f}%")

    # 绘制结果
    names = list(results.keys())
    train_accs = [results[name]['train_acc'] for name in names]
    test_accs = [results[name]['test_acc'] for name in names]

    plt.figure(figsize=(12, 6))
    x = np.arange(len(names))
    width = 0.35

    plt.bar(x - width / 2, train_accs, width, label='Training Accuracy', alpha=0.7)
    plt.bar(x + width / 2, test_accs, width, label='Test Accuracy', alpha=0.7)

    plt.xlabel('Network Structure')
    plt.ylabel('Accuracy (%)')
    plt.title('Network Structure Comparison')
    plt.xticks(x, names, rotation=45)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plt.show()

    return results


structure_results = experiment_network_structures()




def experiment_activation_functions():
    """激活函数对比实验"""
    print("=== 实验3: 激活函数对比 ===")

    activations = ['relu', 'sigmoid', 'tanh']
    results = {}
    train_loader, test_loader = get_data_loaders(batch_size=128)

    for activation in activations:
        print(f"\n--- 测试激活函数: {activation} ---")

        model = BasicMLP(hidden_sizes=[128], activation=activation)
        optimizer = optim.Adam(model.parameters(), lr=0.001)
        criterion = nn.CrossEntropyLoss()

        trainer = ModelTrainer(model, optimizer, criterion)

        epoch_losses = []
        epoch_accs = []

        for epoch in range(1, 11):
            train_loss, train_acc = trainer.train_epoch(train_loader)
            test_loss, test_acc = trainer.test(test_loader)
            epoch_losses.append(test_loss)
            epoch_accs.append(test_acc)

            if epoch == 10:
                results[activation] = {
                    'final_acc': test_acc,
                    'loss_curve': epoch_losses,
                    'acc_curve': epoch_accs
                }

        print(f"{activation}: 最终测试准确率: {test_acc:.2f}%")

    plt.figure(figsize=(15, 5))

    plt.subplot(1, 2, 1)
    for activation in activations:
        plt.plot(results[activation]['loss_curve'], label=activation.upper())
    plt.xlabel('Epoch')
    plt.ylabel('Test Loss')
    plt.title('Activation Functions - Loss Curves')
    plt.legend()
    plt.grid(True)

    plt.subplot(1, 2, 2)
    for activation in activations:
        plt.plot(results[activation]['acc_curve'], label=activation.upper())
    plt.xlabel('Epoch')
    plt.ylabel('Test Accuracy (%)')
    plt.title('Activation Functions - Accuracy Curves')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.show()

    return results


activation_results = experiment_activation_functions()




def experiment_optimizers():
    """优化器对比实验"""
    print("=== 实验4: 优化策略对比 ===")

    optimizers_config = {
        'SGD (lr=0.01)': {'optimizer': optim.SGD, 'lr': 0.01},
        'SGD (lr=0.01, momentum=0.9)': {'optimizer': optim.SGD, 'lr': 0.01, 'momentum': 0.9},
        'Adam (lr=0.001)': {'optimizer': optim.Adam, 'lr': 0.001},
        'RMSprop (lr=0.001)': {'optimizer': optim.RMSprop, 'lr': 0.001}
    }

    results = {}
    train_loader, test_loader = get_data_loaders(batch_size=128)

    for name, config in optimizers_config.items():
        print(f"\n--- 测试优化器: {name} ---")
        model = BasicMLP(hidden_sizes=[128], activation='relu')
        if config['optimizer'] == optim.SGD:
            if 'momentum' in config:
                optimizer = config['optimizer'](model.parameters(), lr=config['lr'], momentum=config['momentum'])
            else:
                optimizer = config['optimizer'](model.parameters(), lr=config['lr'])
        else:
            optimizer = config['optimizer'](model.parameters(), lr=config['lr'])

        criterion = nn.CrossEntropyLoss()
        trainer = ModelTrainer(model, optimizer, criterion)
        epoch_accs = []

        for epoch in range(1, 11):
            trainer.train_epoch(train_loader)
            _, test_acc = trainer.test(test_loader)
            epoch_accs.append(test_acc)

            if epoch == 10:
                results[name] = {
                    'final_acc': test_acc,
                    'acc_curve': epoch_accs
                }

        print(f"{name}: 最终测试准确率: {test_acc:.2f}%")
    plt.figure(figsize=(15, 6))

    # 准确率曲线对比
    plt.subplot(1, 2, 1)
    for name in optimizers_config.keys():
        plt.plot(results[name]['acc_curve'], label=name)
    plt.xlabel('Epoch')
    plt.ylabel('Test Accuracy (%)')
    plt.title('Optimizers Comparison - Accuracy Curves')
    plt.legend()
    plt.grid(True)

    # 最终准确率对比
    plt.subplot(1, 2, 2)
    names = list(results.keys())
    accs = [results[name]['final_acc'] for name in names]

    bars = plt.bar(names, accs, alpha=0.7)
    plt.xlabel('Optimizer')
    plt.ylabel('Final Test Accuracy (%)')
    plt.title('Optimizers Comparison - Final Accuracy')
    plt.xticks(rotation=45)

    for bar, acc in zip(bars, accs):
        plt.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + 0.1,
                 f'{acc:.2f}%', ha='center', va='bottom')

    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plt.show()

    return results


optimizer_results = experiment_optimizers()



def final_experiment():
    print("=== 最终实验: 综合最佳配置 ===")

    # 使用前面实验的最佳配置
    train_loader, test_loader = get_data_loaders(batch_size=128)

    model = BasicMLP(hidden_sizes=[256, 128], activation='relu')
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)  # 添加L2正则化
    criterion = nn.CrossEntropyLoss()

    trainer = ModelTrainer(model, optimizer, criterion)
    trainer.train(train_loader, test_loader, epochs=20)
    trainer.plot_results("Optimized MLP (256-128 hidden units)")

    final_test_loss, final_test_acc = trainer.test(test_loader)
    print(f"\n最终模型性能:")
    print(f"测试损失: {final_test_loss:.4f}")
    print(f"测试准确率: {final_test_acc:.2f}%")

    return trainer, final_test_acc


def error_analysis(model, test_loader):
    """错误分析"""
    model.eval()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    misclassified = []
    confusion_matrix = torch.zeros(10, 10)

    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            pred = output.argmax(dim=1)
            wrong_idx = (pred != target)
            for i in range(wrong_idx.sum()):
                idx = wrong_idx.nonzero()[i]
                misclassified.append({
                    'image': data[idx].cpu().squeeze(),
                    'true': target[idx].item(),
                    'pred': pred[idx].item()
                })
            for t, p in zip(target.view(-1), pred.view(-1)):
                confusion_matrix[t.long(), p.long()] += 1

    # 显示部分错误分类样本
    print(f"\n错误分类样本数: {len(misclassified)}")
    plt.figure(figsize=(15, 6))
    for i in range(min(10, len(misclassified))):
        plt.subplot(2, 5, i + 1)
        plt.imshow(misclassified[i]['image'], cmap='gray')
        plt.title(f'True: {misclassified[i]["true"]}, Pred: {misclassified[i]["pred"]}')
        plt.axis('off')
    plt.tight_layout()
    plt.show()

    return misclassified, confusion_matrix


final_trainer, final_accuracy = final_experiment()
misclassified_samples, conf_matrix = error_analysis(final_trainer.model, test_loader)