import torch.nn as nn
import torch.quantization
from torchvision import datasets, transforms
from torch.utils.data import DataLoader


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm2d(16)
        self.relu = nn.ReLU()
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)  # 添加池化层
        self.fc = nn.Linear(16 * 14 * 14, 10)  # 修正为16×14×14

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.pool(x)  # 应用池化层，将尺寸从28×28变为14×14
        x = x.view(-1, 16 * 14 * 14)  # 展平为全连接层输入
        x = self.fc(x)
        return x


def test_model(model, transform):
    model.eval()
    test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
    test_loader = DataLoader(test_dataset, batch_size=1000, shuffle=False)

    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in test_loader:
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            predicted: torch.Tensor
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print(f'准确率: {100 * correct / total:.2f}%')


def do():
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
    train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)

    # 3. 初始化模型并配置QAT（保持不变）
    model = Net()
    model.train()
    torch.backends.quantized.engine = 'fbgemm'
    model.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
    torch.quantization.prepare_qat(model, inplace=True)

    # 4. 训练与量化转换（保持不变）
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
    criterion = nn.CrossEntropyLoss()

    # 训练循环（示例）
    for epoch in range(3):
        running_loss = 0.0
        for inputs, labels in train_loader:
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        print(f'Epoch {epoch + 1}, avg batch loss: {running_loss / len(train_loader):.4f}')

    test_model(model, transform)
    # 转换为量化模型
    model.eval()
    quantized_model = torch.quantization.convert(model)

    test_model(quantized_model, transform)


if __name__ == '__main__':
    do()
    pass
