import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
import numpy as np
from torch.utils.data import DataLoader, TensorDataset

# 1. 定义量化函数
def quantize_tensor(tensor, scale, zero_point=0):
    """量化张量并返回量化后的张量(包含量化信息)"""
    if scale == 0:  # 避免除以零
        return tensor
    q_tensor = torch.quantize_per_tensor(tensor, scale, zero_point, torch.qint8)
    return q_tensor

def dequantize_tensor(q_tensor):
    """反量化张量"""
    return q_tensor.dequantize()

# 2. 量化器类
class PTQQuantizer:
    def __init__(self, num_bits=8):
        self.num_bits = num_bits
        self.activation_scales = {}
        self.weight_scales = {}
        self.zero_point = 0  # 对称量化使用0
        self.hooks = []

    def _save_activation_scale(self, name, output):
        """通过前向钩子捕获激活值并计算缩放因子"""
        max_val = torch.max(torch.abs(output)).item()
        qmax = 2 ** (self.num_bits - 1) - 1
        scale = max_val / qmax if max_val > 0 else 1.0
        self.activation_scales[name] = scale
        print(f"Captured activation scale for {name}: {scale}")

    def _calculate_weight_scales(self, model):
        """计算所有权重的缩放因子"""
        for name, module in model.named_modules():
            if isinstance(module, (nn.Conv2d, nn.Linear)):
                weight = module.weight.data
                max_val = torch.max(torch.abs(weight)).item()
                qmax = 2 ** (self.num_bits - 1) - 1
                scale = max_val / qmax if max_val > 0 else 1.0
                self.weight_scales[name] = scale
                print(f"Calculated weight scale for {name}: {scale}")

    def register_hooks(self, model):
        """为所有卷积和线性层注册钩子"""
        for name, module in model.named_modules():
            if isinstance(module, (nn.Conv2d, nn.Linear)):
                # 注册前向钩子捕获输出激活
                hook = module.register_forward_hook(
                    lambda m, inp, out, name=name: self._save_activation_scale(
                        name, out
                    )
                )
                self.hooks.append(hook)

    def remove_hooks(self):
        """移除所有钩子"""
        for hook in self.hooks:
            hook.remove()
        self.hooks = []

    def quantize_model_forward(self, model):
        """创建量化版本的前向传播 (包含Q/DQ节点)"""
        self._calculate_weight_scales(model)
        input_scale = self.activation_scales.get("input", 1.0)

        def quantized_forward(x):
            # 量化输入
            q_input = quantize_tensor(x, input_scale, self.zero_point)
            dq_input = dequantize_tensor(q_input)
            x = dq_input
            
            # 按顺序执行量化后的层
            for name, module in model.named_children():
                if isinstance(module, (nn.Conv2d, nn.Linear)):
                    # 量化权重
                    weight_scale = self.weight_scales.get(name, 1.0)
                    q_weight = quantize_tensor(
                        module.weight.data, weight_scale, self.zero_point
                    )
                    dq_weight = dequantize_tensor(q_weight)
                    
                    # 保存原始权重并替换
                    original_weight = module.weight.data.clone()
                    module.weight.data = dq_weight
                    
                    # 执行层计算
                    x = module(x)
                    
                    # 恢复原始权重
                    module.weight.data = original_weight
                    
                    # 量化激活输出
                    if name in self.activation_scales:
                        act_scale = self.activation_scales[name]
                        q_act = quantize_tensor(x, act_scale, self.zero_point)
                        x = dequantize_tensor(q_act)
                elif isinstance(module, nn.Flatten):
                    # 添加展平层处理
                    x = module(x)
                else:
                    x = module(x)
            return x

        # 替换模型前向传播
        model.quant_forward = quantized_forward
        return model

# 3. 校准函数
def calibrate_model(model, calib_loader, num_bits=8):
    """
    校准模型：运行数据收集激活范围
    :param model: 待量化模型
    :param calib_loader: 校准数据加载器
    :param num_bits: 量化位数
    """
    quantizer = PTQQuantizer(num_bits)

    # 注册钩子收集激活统计信息
    quantizer.register_hooks(model)

    # 运行校准数据
    model.eval()
    with torch.no_grad():
        for i, data in enumerate(calib_loader):
            # 记录输入scale (使用第一个batch计算)
            if "input" not in quantizer.activation_scales:
                max_val = torch.max(torch.abs(data)).item()
                qmax = 2 ** (num_bits - 1) - 1
                quantizer.activation_scales["input"] = max_val / qmax if max_val > 0 else 1.0
                print(f"Input scale: {quantizer.activation_scales['input']}")
            model(data)
            if i >= 10:  # 限制校准批次数量
                break

    # 移除钩子
    quantizer.remove_hooks()

    # 应用量化前向传播
    quantizer.quantize_model_forward(model)
    return model

# 4. 创建随机数据DataLoader
def create_random_dataloader(batch_size=32, num_batches=10, input_shape=(3, 32, 32)):
    """创建随机数据DataLoader用于校准"""
    data = []
    for _ in range(num_batches):
        batch = torch.randn(batch_size, *input_shape)
        data.append(batch)
    return data

# 5. 修复模型结构
class SimpleCNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(3, 16, 3, padding=1)  # 输入32x32 -> 输出32x32
        self.relu1 = nn.ReLU()
        self.pool1 = nn.MaxPool2d(2, 2)  # 32x32 -> 16x16
        self.conv2 = nn.Conv2d(16, 32, 3, padding=1)  # 16x16 -> 16x16
        self.relu2 = nn.ReLU()
        self.pool2 = nn.MaxPool2d(2, 2)  # 16x16 -> 8x8
        self.flatten = nn.Flatten()  # 添加展平层
        self.fc = nn.Linear(32 * 8 * 8, 10)  # 32*8*8=2048

    def forward(self, x):
        x = self.pool1(self.relu1(self.conv1(x)))
        x = self.pool2(self.relu2(self.conv2(x)))
        x = self.flatten(x)  # 使用展平层
        x = self.fc(x)
        return x

# 6. 导出ONNX模型 (包含Q/DQ算子)
def export_quantized_onnx(model, output_path, quantizer, input_shape=(1, 3, 32, 32)):
    """
    导出包含Q/DQ算子的ONNX模型
    :param model: 量化模型 (必须包含quant_forward方法)
    :param output_path: 输出路径
    :param quantizer: 包含量化参数的量化器
    :param input_shape: 输入张量形状
    """
    # 创建手动插入Q/DQ的模型
    class QuantizedModel(nn.Module):
        def __init__(self, original_model, quantizer):
            super().__init__()
            self.model = original_model
            self.quantizer = quantizer
            
        def forward(self, x):
            # 输入量化
            input_scale = self.quantizer.activation_scales.get("input", 1.0)
            x = torch.quantize_per_tensor(x, input_scale, 0, torch.qint8)
            x = x.dequantize()
            
            # 按层处理
            for name, module in self.model.named_children():
                if isinstance(module, (nn.Conv2d, nn.Linear)):
                    # 获取量化参数
                    weight_scale = self.quantizer.weight_scales.get(name, 1.0)
                    act_scale = self.quantizer.activation_scales.get(name, 1.0)
                    
                    # 量化权重
                    weight = module.weight
                    weight = torch.quantize_per_tensor(weight, weight_scale, 0, torch.qint8)
                    weight = weight.dequantize()
                    
                    # 执行卷积或线性层
                    if isinstance(module, nn.Conv2d):
                        x = F.conv2d(
                            x, weight, module.bias, 
                            module.stride, module.padding, 
                            module.dilation, module.groups
                        )
                    else:
                        x = F.linear(x, weight, module.bias)
                    
                    # 激活量化
                    x = torch.quantize_per_tensor(x, act_scale, 0, torch.qint8)
                    x = x.dequantize()
                elif isinstance(module, nn.Flatten):
                    x = module(x)
                else:
                    x = module(x)
            return x

    # 创建量化模型实例
    quantized_model = QuantizedModel(model, quantizer)
    quantized_model.eval()
    
    # 创建虚拟输入
    dummy_input = torch.randn(*input_shape)
    
    # 导出ONNX
    torch.onnx.export(
        quantized_model,
        dummy_input,
        output_path,
        opset_version=13,  # 使用支持量化算子的opset
        input_names=["input"],
        output_names=["output"],
        dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}},
        do_constant_folding=False,  # 禁用常量折叠
        verbose=True,
    )
    print(f"Quantized model with Q/DQ nodes exported to {output_path}")

# 7. 主函数
def main():
    # 创建模型
    model = SimpleCNN()
    print("Model structure:")
    print(model)

    # 创建随机数据DataLoader (100个样本)
    calib_loader = create_random_dataloader(
        batch_size=10, num_batches=10, input_shape=(3, 32, 32)
    )

    # 执行PTQ量化
    print("Calibrating model...")
    quantizer = PTQQuantizer(8)
    quantizer.register_hooks(model)
    
    # 运行校准数据
    model.eval()
    with torch.no_grad():
        for i, data in enumerate(calib_loader):
            if "input" not in quantizer.activation_scales:
                max_val = torch.max(torch.abs(data)).item()
                qmax = 2 ** (7) - 1
                quantizer.activation_scales["input"] = max_val / qmax if max_val > 0 else 1.0
                print(f"Input scale: {quantizer.activation_scales['input']}")
            model(data)
            if i >= 10:  # 限制校准批次数量
                break

    quantizer.remove_hooks()
    quantizer._calculate_weight_scales(model)
    
    # 导出为ONNX格式 (包含Q/DQ算子)
    print("Exporting to ONNX with Q/DQ nodes...")
    export_quantized_onnx(
        model, 
        "quantized_model.onnx", 
        quantizer,
        input_shape=(1, 3, 32, 32)
    )

if __name__ == "__main__":
    main()