import torch
import torchvision.models as models

# 加载预训练模型
model = models.resnet18(pretrained=True)
model.eval()

# 准备输入数据（模拟）
example_input = torch.randn(1, 3, 224, 224)

# 应用静态量化
model.qconfig = torch.quantization.get_default_static_qconfig('fbgemm')
torch.quantization.prepare(model, inplace=True)
torch.quantization.convert(model, inplace=True)

# 量化后的模型推理
with torch.no_grad():
    output = model(example_input)
    print(output)


# 应用动态量化
model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
torch.quantization.prepare_dynamic(model, inplace=True)

# 量化后的模型推理
with torch.no_grad():
    output = model(example_input)
    print(output)


import torch
import torchvision.models as models
import torch.optim as optim

# 加载预训练模型
model = models.resnet18(pretrained=True)
model.train()

# 准备训练数据（模拟）
train_loader = ...  # 假设已有训练数据加载器

# 定义量化配置
model.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
torch.quantization.prepare_qat(model, inplace=True)

# 定义优化器和损失函数
optimizer = optim.SGD(model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()

# 量化感知训练循环（简化示例）
for epoch in range(num_epochs):
    for inputs, targets in train_loader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

# 转换为量化模型
torch.quantization.convert(model, inplace=True)
model.eval()

# 量化后的模型推理
with torch.no_grad():
    output = model(example_input)
    print(output)


