import torch
import torch.nn as nn
from torch.quantization import QuantStub, DeQuantStub, prepare_qat, convert
import os

class QuantizableRRDBNet(nn.Module):
    """可量化的Real-ESRGAN网络"""
    
    def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=32, num_block=8, num_grow_ch=16):
        super().__init__()
        
        # 量化stub
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
        
        self.num_feat = num_feat
        self.num_block = num_block
        
        # 初始卷积
        self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
        
        # 残差块
        self.body = nn.ModuleList()
        for i in range(num_block):
            self.body.append(
                QuantizableResidualBlock(num_feat + i * num_grow_ch, num_grow_ch)
            )
        
        # 特征融合
        self.conv_body = nn.Conv2d(num_feat + num_block * num_grow_ch, num_feat, 3, 1, 1)
        
        # 上采样
        self.upconv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
        self.upconv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
        self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
        self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
        
        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
    
    def forward(self, x):
        # 量化输入
        x = self.quant(x)
        
        feat = self.lrelu(self.conv_first(x))
        
        for block in self.body:
            feat = block(feat)
        
        feat = self.lrelu(self.conv_body(feat))
        
        # 上采样
        feat = self.lrelu(self.upconv1(F.interpolate(feat, scale_factor=2, mode='nearest')))
        feat = self.lrelu(self.upconv2(F.interpolate(feat, scale_factor=2, mode='nearest')))
        
        out = self.conv_last(self.lrelu(self.conv_hr(feat)))
        
        # 反量化输出
        out = self.dequant(out)
        return out
    
    def fuse_model(self):
        """融合模型中的卷积和激活层"""
        for name, module in self.named_children():
            if name == 'body':
                for block in module:
                    if hasattr(block, 'fuse_model'):
                        block.fuse_model()

class QuantizableResidualBlock(nn.Module):
    """可量化的残差块"""
    
    def __init__(self, in_channels, grow_channels):
        super().__init__()
        out_channels = in_channels + grow_channels
        
        self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, 1)
        self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 1)
        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
    
    def forward(self, x):
        identity = x
        out = self.lrelu(self.conv1(x))
        out = self.conv2(out)
        out += identity
        return self.lrelu(out)
    
    def fuse_model(self):
        """融合卷积和激活层"""
        torch.quantization.fuse_modules(self, [['conv1', 'lrelu']], inplace=True)

class ModelQuantizer:
    """模型量化器"""
    
    def __init__(self, qconfig_spec=None):
        self.qconfig_spec = qconfig_spec or {
            '': torch.quantization.get_default_qat_qconfig('fbgemm')
        }
    
    def prepare_qat(self, model):
        """准备量化感知训练"""
        model.train()
        model.fuse_model()
        prepared_model = prepare_qat(model, self.qconfig_spec)
        return prepared_model
    
    def quantize_static(self, model, calibration_loader, device='cpu'):
        """静态量化"""
        model.eval()
        model.fuse_model()
        
        # 准备模型
        model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
        prepared_model = torch.quantization.prepare(model, inplace=False)
        
        # 校准
        with torch.no_grad():
            for batch_idx, (lr_imgs, _) in enumerate(calibration_loader):
                if batch_idx >= 100:  # 使用100个批次校准
                    break
                lr_imgs = lr_imgs.to(device)
                _ = prepared_model(lr_imgs)
        
        # 转换到量化模型
        quantized_model = torch.quantization.convert(prepared_model, inplace=False)
        return quantized_model
    
    def quantize_dynamic(self, model):
        """动态量化（主要量化线性层）"""
        quantized_model = torch.quantization.quantize_dynamic(
            model, {nn.Linear, nn.Conv2d}, dtype=torch.qint8
        )
        return quantized_model

def test_quantization():
    """测试量化流程"""
    # 创建模型
    model = QuantizableRRDBNet(num_feat=32, num_block=8)
    
    # 统计原始大小
    original_size = sum(p.numel() * p.element_size() for p in model.parameters())
    print(f"原始模型大小: {original_size/1024/1024:.2f} MB")
    
    # 创建量化器
    quantizer = ModelQuantizer()
    
    # 动态量化
    dynamic_quantized = quantizer.quantize_dynamic(model)
    dynamic_size = sum(p.numel() * p.element_size() for p in dynamic_quantized.parameters())
    print(f"动态量化后大小: {dynamic_size/1024/1024:.2f} MB")
    print(f"动态量化压缩比: {original_size/dynamic_size:.2f}x")
    
    # 静态量化需要校准数据
    # static_quantized = quantizer.quantize_static(model, calibration_loader, device='cpu')

def save_quantized_model(model, model_path):
    """保存量化模型"""
    # 保存模型状态
    torch.save(model.state_dict(), model_path)
    
    # 统计模型大小
    model_size = os.path.getsize(model_path) / 1024 / 1024
    print(f"模型保存到: {model_path}")
    print(f"模型文件大小: {model_size:.2f} MB")
    
    return model_size

if __name__ == "__main__":
    test_quantization()