import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
from torch.optim.lr_scheduler import StepLR
import math
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
import json
import numpy as np
from itertools import product

class RadixSoftmax(nn.Module):
    """Radix Softmax模块"""
    def __init__(self, radix, cardinality):
        super(RadixSoftmax, self).__init__()
        self.radix = radix
        self.cardinality = cardinality

    def forward(self, x):
        batch = x.size(0)
        if self.radix > 1:
            x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
            x = F.softmax(x, dim=1)
            x = x.reshape(batch, -1)
        else:
            x = torch.sigmoid(x)
        return x


class SplitAttention(nn.Module):
    """完整的Split Attention模块"""
    def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0,
                 dilation=1, groups=1, bias=True, radix=2, reduction_factor=4,
                 rectify=False, rectify_avg=False, norm_layer=None, dropblock_prob=0.0):
        super(SplitAttention, self).__init__()

        inter_channels = max(in_channels * radix // reduction_factor, 32)
        self.radix = radix
        self.cardinality = groups
        self.channels = channels
        self.dropblock_prob = dropblock_prob

        # Radix卷积
        self.conv = nn.Conv2d(in_channels, channels * radix, kernel_size, stride,
                              padding, dilation, groups=groups * radix, bias=bias)
        self.use_bn = norm_layer is not None
        if self.use_bn:
            self.bn0 = norm_layer(channels * radix)

        self.relu = nn.ReLU(inplace=True)

        # 全局上下文模块
        self.fc1 = nn.Conv2d(channels, inter_channels, 1, groups=self.cardinality)
        if self.use_bn:
            self.bn1 = norm_layer(inter_channels)
        self.fc2 = nn.Conv2d(inter_channels, channels * radix, 1, groups=self.cardinality)

        self.rsoftmax = RadixSoftmax(radix, groups)

    def forward(self, x):
        x = self.conv(x)
        if self.use_bn:
            x = self.bn0(x)
        if self.dropblock_prob > 0.0:
            x = self.dropblock(x)
        x = self.relu(x)

        batch, rchannel = x.shape[:2]
        if self.radix > 1:
            splited = torch.split(x, rchannel // self.radix, dim=1)
            gap = sum(splited)
        else:
            gap = x

        # 全局平均池化
        gap = F.adaptive_avg_pool2d(gap, 1)
        gap = self.fc1(gap)

        if self.use_bn:
            gap = self.bn1(gap)
        gap = self.relu(gap)

        atten = self.fc2(gap)
        atten = self.rsoftmax(atten).view(batch, -1, 1, 1)

        if self.radix > 1:
            attens = torch.split(atten, rchannel // self.radix, dim=1)
            out = sum([att * split for (att, split) in zip(attens, splited)])
        else:
            out = atten * x
        return out.contiguous()


class ResNeStBottleneck(nn.Module):
    """ResNeSt Bottleneck块"""
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None,
                 radix=1, cardinality=1, bottleneck_width=64,
                 avd=False, avd_first=False, dilation=1, is_first=False,
                 rectified_conv=False, rectify_avg=False,
                 norm_layer=None, dropblock_prob=0.0, last_gamma=False):
        super(ResNeStBottleneck, self).__init__()

        group_width = int(planes * (bottleneck_width / 64.)) * cardinality
        self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
        self.bn1 = norm_layer(group_width)

        if radix > 0:
            self.conv2 = SplitAttention(
                group_width, group_width, kernel_size=3, stride=stride,
                padding=dilation, dilation=dilation, groups=cardinality,
                bias=False, radix=radix, reduction_factor=4,
                rectify=rectified_conv, rectify_avg=rectify_avg,
                norm_layer=norm_layer, dropblock_prob=dropblock_prob)
        else:
            self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride,
                                   padding=dilation, dilation=dilation, groups=cardinality,
                                   bias=False)
            self.bn2 = norm_layer(group_width)

        self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False)
        self.bn3 = norm_layer(planes * 4)

        if last_gamma:
            from torch.nn.init import zeros_
            zeros_(self.bn3.weight)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.dilation = dilation
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        if hasattr(self, 'conv2'):
            out = self.conv2(out)
        else:
            out = self.conv2(out)
            out = self.bn2(out)
            out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


class ResNeSt(nn.Module):
    """完整的ResNeSt网络"""
    def __init__(self, block, layers, radix=1, groups=1, bottleneck_width=64,
                 num_classes=1000, dilated=False, dilation=1,
                 deep_stem=False, stem_width=64, avg_down=False,
                 rectified_conv=False, rectify_avg=False,
                 avd=False, avd_first=False,
                 final_drop=0.0, dropblock_prob=0,
                 last_gamma=False, norm_layer=None):
        self.cardinality = groups
        self.bottleneck_width = bottleneck_width
        self.inplanes = stem_width * 2 if deep_stem else 64
        self.avg_down = avg_down
        self.last_gamma = last_gamma
        self.radix = radix
        self.avd = avd
        self.avd_first = avd_first

        super(ResNeSt, self).__init__()

        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self._norm_layer = norm_layer

        # 深度stem或标准stem
        if deep_stem:
            self.conv1 = nn.Sequential(
                nn.Conv2d(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False),
                norm_layer(stem_width),
                nn.ReLU(inplace=True),
                nn.Conv2d(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False),
                norm_layer(stem_width),
                nn.ReLU(inplace=True),
                nn.Conv2d(stem_width, stem_width * 2, kernel_size=3, stride=1, padding=1, bias=False),
            )
        else:
            # 适配CIFAR-100的标准stem
            self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)

        self.bn1 = norm_layer(self.inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # ResNeSt层
        self.layer1 = self._make_layer(block, 64, layers[0], is_first=False)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
                                       dilation=1 if not dilated else 2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
                                       dilation=1 if not dilated else 4)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.drop = nn.Dropout(final_drop) if final_drop > 0.0 else None
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, norm_layer):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def _make_layer(self, block, planes, blocks, stride=1, dilation=1, is_first=True):
        norm_layer = self._norm_layer
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            down_layers = []
            if self.avg_down:
                if dilation == 1:
                    down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride,
                                                    ceil_mode=True, count_include_pad=False))
                else:
                    down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1,
                                                    ceil_mode=True, count_include_pad=False))
                down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
                                             kernel_size=1, stride=1, bias=False))
            else:
                down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
                                             kernel_size=1, stride=stride, bias=False))
            down_layers.append(norm_layer(planes * block.expansion))
            downsample = nn.Sequential(*down_layers)

        layers = []
        if dilation == 1 or dilation == 2:
            layers.append(block(self.inplanes, planes, stride, downsample=downsample,
                                radix=self.radix, cardinality=self.cardinality,
                                bottleneck_width=self.bottleneck_width,
                                avd=self.avd, avd_first=self.avd_first,
                                dilation=1, is_first=is_first,
                                norm_layer=norm_layer, last_gamma=self.last_gamma))
        elif dilation == 4:
            layers.append(block(self.inplanes, planes, stride, downsample=downsample,
                                radix=self.radix, cardinality=self.cardinality,
                                bottleneck_width=self.bottleneck_width,
                                avd=self.avd, avd_first=self.avd_first,
                                dilation=2, is_first=is_first,
                                norm_layer=norm_layer, last_gamma=self.last_gamma))
        else:
            raise RuntimeError("=> unknown dilation size: {}".format(dilation))

        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes,
                                radix=self.radix, cardinality=self.cardinality,
                                bottleneck_width=self.bottleneck_width,
                                avd=self.avd, avd_first=self.avd_first,
                                dilation=dilation,
                                norm_layer=norm_layer, last_gamma=self.last_gamma))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        # 对于CIFAR-100，跳过maxpool以保持特征图尺寸
        # x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        if self.drop:
            x = self.drop(x)
        x = self.fc(x)

        return x


def resnest50(pretrained=False, root='~/.encoding/models', **kwargs):
    """ResNeSt-50模型"""
    model = ResNeSt(ResNeStBottleneck, [3, 4, 6, 3],
                    radix=2, groups=1, bottleneck_width=64,
                    deep_stem=True, stem_width=32, avg_down=True,
                    avd=True, avd_first=False, **kwargs)
    return model


def resnest101(pretrained=False, root='~/.encoding/models', **kwargs):
    """ResNeSt-101模型"""
    model = ResNeSt(ResNeStBottleneck, [3, 4, 23, 3],
                    radix=2, groups=1, bottleneck_width=64,
                    deep_stem=True, stem_width=64, avg_down=True,
                    avd=True, avd_first=False, **kwargs)
    return model


def resnest200(pretrained=False, root='~/.encoding/models', **kwargs):
    """ResNeSt-200模型"""
    model = ResNeSt(ResNeStBottleneck, [3, 24, 36, 3],
                    radix=2, groups=1, bottleneck_width=64,
                    deep_stem=True, stem_width=64, avg_down=True,
                    avd=True, avd_first=False, **kwargs)
    return model


def resnest269(pretrained=False, root='~/.encoding/models', **kwargs):
    """ResNeSt-269模型"""
    model = ResNeSt(ResNeStBottleneck, [3, 30, 48, 8],
                    radix=2, groups=1, bottleneck_width=64,
                    deep_stem=True, stem_width=64, avg_down=True,
                    avd=True, avd_first=False, **kwargs)
    return model


# CIFAR-100数据加载和预处理
def get_cifar100_loaders(batch_size=128):
    # 数据增强
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(15),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
        transforms.ToTensor(),
        transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
    ])

    trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
                                             download=True, transform=transform_train)
    trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR100(root='./data', train=False,
                                            download=True, transform=transform_test)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)

    return trainloader, testloader


def plot_training_history(train_losses, train_accs, test_losses, test_accs, save_path='./data'):
    # 确保保存路径存在
    os.makedirs(save_path, exist_ok=True)

    # 设置图表风格
    plt.style.use('default')

    # 创建2x2的子图
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))

    epochs = range(1, len(train_losses) + 1)

    # 训练损失
    ax1.plot(epochs, train_losses, 'b-', linewidth=2, label='Train Loss')
    ax1.set_title('Training Loss', fontsize=14, fontweight='bold')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Loss')
    ax1.grid(True, alpha=0.3)
    ax1.legend()

    # 测试损失
    ax2.plot(epochs, test_losses, 'r-', linewidth=2, label='Test Loss')
    ax2.set_title('Test Loss', fontsize=14, fontweight='bold')
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('Loss')
    ax2.grid(True, alpha=0.3)
    ax2.legend()

    # 训练准确率
    ax3.plot(epochs, train_accs, 'b-', linewidth=2, label='Train Acc')
    ax3.set_title('Training Accuracy', fontsize=14, fontweight='bold')
    ax3.set_xlabel('Epoch')
    ax3.set_ylabel('Accuracy (%)')
    ax3.grid(True, alpha=0.3)
    ax3.legend()

    # 测试准确率
    ax4.plot(epochs, test_accs, 'r-', linewidth=2, label='Test Acc')
    ax4.set_title('Test Accuracy', fontsize=14, fontweight='bold')
    ax4.set_xlabel('Epoch')
    ax4.set_ylabel('Accuracy (%)')
    ax4.grid(True, alpha=0.3)
    ax4.legend()

    plt.tight_layout()
    plt.savefig(os.path.join(save_path, 'training_history_separate.png'), dpi=300, bbox_inches='tight')
    plt.close()

    # 创建损失和准确率对比图
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))

    # 损失对比
    ax1.plot(epochs, train_losses, 'b-', linewidth=2, label='Train Loss')
    ax1.plot(epochs, test_losses, 'r-', linewidth=2, label='Test Loss')
    ax1.set_title('Loss Comparison', fontsize=14, fontweight='bold')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Loss')
    ax1.grid(True, alpha=0.3)
    ax1.legend()

    # 准确率对比
    ax2.plot(epochs, train_accs, 'b-', linewidth=2, label='Train Acc')
    ax2.plot(epochs, test_accs, 'r-', linewidth=2, label='Test Acc')
    ax2.set_title('Accuracy Comparison', fontsize=14, fontweight='bold')
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('Accuracy (%)')
    ax2.grid(True, alpha=0.3)
    ax2.legend()

    plt.tight_layout()
    plt.savefig(os.path.join(save_path, 'training_history_comparison.png'), dpi=300, bbox_inches='tight')
    plt.close()

    print(f"Training history plots saved to {save_path}")


# 训练函数
def train_model(model, trainloader, testloader, num_epochs=50, device='cuda'):
    model.to(device)
    criterion = nn.CrossEntropyLoss()

    # 设置学习率
    initial_lr = 0.1

    # 创建实际训练的优化器
    optimizer = optim.SGD(model.parameters(), lr=initial_lr, momentum=0.9, weight_decay=1e-4)
    scheduler = StepLR(optimizer, step_size=30, gamma=0.2)

    best_acc = 0.0
    train_losses = []
    train_accs = []
    test_losses = []
    test_accs = []

    print(f"Starting training for {num_epochs} epochs on {device}")

    import time
    start_time = time.time()

    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        epoch_loss = 0.0
        correct = 0
        total = 0

        for i, (inputs, labels) in enumerate(trainloader):
            inputs, labels = inputs.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            epoch_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()

            if i % 100 == 99:
                current_lr = optimizer.param_groups[0]['lr']
                print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(trainloader)}], '
                      f'Loss: {running_loss / 100:.4f}, Acc: {100. * correct / total:.2f}%')
                running_loss = 0.0

        # 记录训练指标
        train_acc = 100. * correct / total
        train_loss = epoch_loss / len(trainloader)
        train_accs.append(train_acc)
        train_losses.append(train_loss)

        # 测试阶段
        model.eval()
        test_correct = 0
        test_total = 0
        test_loss = 0.0

        with torch.no_grad():
            for inputs, labels in testloader:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                test_loss += loss.item()

                _, predicted = outputs.max(1)
                test_total += labels.size(0)
                test_correct += predicted.eq(labels).sum().item()

        test_acc = 100. * test_correct / test_total
        test_loss = test_loss / len(testloader)
        test_accs.append(test_acc)
        test_losses.append(test_loss)

        print(f'Epoch [{epoch + 1}/{num_epochs}] - Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%, '
              f'Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%')

        # 保存最佳模型
        if test_acc > best_acc:
            best_acc = test_acc
            torch.save(model.state_dict(), 'resnest_cifar100_best.pth')
            print(f'New best model saved with accuracy: {best_acc:.2f}%')

        scheduler.step()

    training_time = time.time() - start_time
    print(f'Training completed! Best test accuracy: {best_acc:.2f}%')
    print(f'Total training time: {training_time / 3600:.2f} hours')

    # 绘制训练历史
    plot_training_history(train_losses, train_accs, test_losses, test_accs)

    return train_losses, train_accs, test_losses, test_accs


# 测试模型架构
def test_model_architecture():
    """测试模型是否可以正常前向传播"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = resnest50(num_classes=100).to(device)

    # 创建测试输入
    test_input = torch.randn(2, 3, 32, 32).to(device)

    print(f"Model parameters: {sum(p.numel() for p in model.parameters()):,}")

    try:
        with torch.no_grad():
            output = model(test_input)
        print(f"Test passed! Output shape: {output.shape}")
        return True
    except Exception as e:
        print(f"Test failed: {e}")
        return False


# 主函数
def main():
    # 首先测试模型架构
    print("Testing model architecture...")
    if not test_model_architecture():
        print("Model architecture test failed. Please check the implementation.")
        return

    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'Using device: {device}')

    # 创建模型
    model = resnest50(num_classes=100)
    print(f'Model created with {sum(p.numel() for p in model.parameters()):,} parameters')

    # 加载数据
    trainloader, testloader = get_cifar100_loaders(batch_size=128)
    print('Data loaders created')

    # 训练模型
    train_losses, train_accs, test_losses, test_accs = train_model(
        model, trainloader, testloader, num_epochs=50, device=device
    )

    # 保存训练历史
    torch.save({
        'train_losses': train_losses,
        'train_accs': train_accs,
        'test_losses': test_losses,
        'test_accs': test_accs
    }, 'training_history.pth')

    print("Training history saved to training_history.pth")


def run_ablation_experiments():
    """运行消融实验，比较不同Radix和Cardinality组合"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'Using device: {device}')

    # 实验配置
    radix_values = [0, 1, 2, 4]  # 0表示标准ResNet块
    cardinality_values = [1, 2]  # 基数(组数)
    num_epochs = 50
    batch_size = 128

    # 结果存储
    results = {
        'configs': [],
        'params': [],
        'train_acc': [],
        'test_acc': [],
        'train_loss': [],
        'test_loss': []
    }

    # 加载数据(所有实验共享相同的数据加载器)
    trainloader, testloader = get_cifar100_loaders(batch_size=batch_size)

    for radix, cardinality in product(radix_values, cardinality_values):
        # 跳过无效组合(radix=0时cardinality必须为1)
        if radix == 0 and cardinality != 1:
            continue

        print(f"\n{'=' * 50}")
        print(f"Starting experiment with radix={radix}, cardinality={cardinality}")
        print(f"{'=' * 50}")

        # 创建模型
        model = ResNeSt(ResNeStBottleneck, [3, 4, 6, 3],
                        radix=radix, groups=cardinality, bottleneck_width=64,
                        deep_stem=True, stem_width=32, avg_down=True,
                        avd=True, num_classes=100)

        # 计算参数量
        total_params = sum(p.numel() for p in model.parameters())
        print(f"Model parameters: {total_params:,}")

        # 训练模型
        train_loss, train_acc, test_loss, test_acc = train_model(
            model, trainloader, testloader,
            num_epochs=num_epochs, device=device
        )

        # 存储结果
        config_name = f"R{radix}_C{cardinality}"
        results['configs'].append(config_name)
        results['params'].append(total_params)
        results['train_acc'].append(max(train_acc))
        results['test_acc'].append(max(test_acc))
        results['train_loss'].append(min(train_loss))
        results['test_loss'].append(min(test_loss))

        # 保存当前实验结果
        with open('ablation_results.json', 'w') as f:
            json.dump(results, f, indent=4)

    return results


def plot_ablation_results(results, save_dir='./data/ablation'):
    """可视化消融实验结果"""
    os.makedirs(save_dir, exist_ok=True)

    configs = results['configs']
    params = np.array(results['params']) / 1e6  # 转换为百万参数
    test_acc = results['test_acc']

    # 创建颜色映射
    colors = plt.cm.viridis(np.linspace(0, 1, len(configs)))

    # 1. 准确率比较柱状图
    plt.figure(figsize=(12, 6))
    bars = plt.bar(configs, test_acc, color=colors)
    plt.title('Test Accuracy by Configuration', fontsize=14, fontweight='bold')
    plt.xlabel('Configuration (Radix_Cardinality)')
    plt.ylabel('Accuracy (%)')
    plt.ylim(70, 85)

    # 添加数值标签
    for bar in bars:
        height = bar.get_height()
        plt.text(bar.get_x() + bar.get_width() / 2., height,
                 f'{height:.2f}%',
                 ha='center', va='bottom')

    plt.grid(True, axis='y', alpha=0.3)
    plt.savefig(os.path.join(save_dir, 'accuracy_comparison.png'), dpi=300, bbox_inches='tight')
    plt.close()

    # 2. 参数量与准确率关系图
    plt.figure(figsize=(10, 6))
    for i, (config, param, acc) in enumerate(zip(configs, params, test_acc)):
        plt.scatter(param, acc, color=colors[i], s=150, label=config)

    plt.title('Parameters vs. Accuracy', fontsize=14, fontweight='bold')
    plt.xlabel('Parameters (Millions)')
    plt.ylabel('Test Accuracy (%)')
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 添加趋势线
    z = np.polyfit(params, test_acc, 1)
    p = np.poly1d(z)
    plt.plot(params, p(params), "r--")

    plt.savefig(os.path.join(save_dir, 'params_vs_accuracy.png'), dpi=300, bbox_inches='tight')
    plt.close()

    # 3. 综合性能对比图
    fig, ax1 = plt.subplots(figsize=(12, 6))

    # 参数量(柱状图)
    ax1.bar(configs, params, color='skyblue', alpha=0.6, label='Parameters (M)')
    ax1.set_xlabel('Configuration (Radix_Cardinality)')
    ax1.set_ylabel('Parameters (Millions)')
    ax1.tick_params(axis='y')

    # 准确率(折线图)
    ax2 = ax1.twinx()
    ax2.plot(configs, test_acc, 'r-o', linewidth=2, markersize=8, label='Test Accuracy (%)')
    ax2.set_ylabel('Accuracy (%)')
    ax2.tick_params(axis='y', colors='red')
    ax2.spines['right'].set_color('red')

    plt.title('Model Configuration Comparison', fontsize=14, fontweight='bold')
    fig.legend(loc='upper right', bbox_to_anchor=(0.9, 0.9))
    plt.grid(True, axis='x', alpha=0.3)

    plt.savefig(os.path.join(save_dir, 'comprehensive_comparison.png'), dpi=300, bbox_inches='tight')
    plt.close()

    print(f"Visualizations saved to {save_dir}")


def ablation_study():
    """执行完整的消融实验流程"""
    # 如果已有结果则加载，否则重新运行实验
    if os.path.exists('ablation_results.json'):
        print("Loading existing ablation results...")
        with open('ablation_results.json', 'r') as f:
            results = json.load(f)
    else:
        print("Running new ablation experiments...")
        results = run_ablation_experiments()

    # 打印结果摘要
    print("\nAblation Study Results Summary:")
    print("{:<15} {:<15} {:<15} {:<15}".format(
        "Config", "Params(M)", "Train Acc(%)", "Test Acc(%)"))
    for config, param, train_acc, test_acc in zip(
            results['configs'],
            np.array(results['params']) / 1e6,
            results['train_acc'],
            results['test_acc']
    ):
        print("{:<15} {:<15.2f} {:<15.2f} {:<15.2f}".format(
            config, param, train_acc, test_acc))

    # 可视化结果
    plot_ablation_results(results)

if __name__ == '__main__':
    # 执行消融实验
    #ablation_study()
    main()