import torch
import torch.nn as nn
import argparse
from yolov5.models.yolo import Model
from yolov5.utils.torch_utils import select_device

class QuantizedYOLOv5(nn.Module):
    def __init__(self, model):
        super(QuantizedYOLOv5, self).__init__()
        self.model = model
        self.quant = torch.quantization.QuantStub()
        self.dequant = torch.quantization.DeQuantStub()

    def forward(self, x):
        x = self.quant(x)
        x = self.model(x)
        x = self.dequant(x)
        return x


def prepare_model_for_quantization(model, device):
    """准备模型进行量化"""
    # 设置量化配置
    model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
    torch.quantization.prepare(model, inplace=True)
    return model


def quantize_model(model, device, calibration_data_loader=None):
    """量化模型"""
    # 如果提供了校准数据，则进行校准
    if calibration_data_loader:
        print("使用校准数据进行量化感知训练...")
        model.train()
        for batch in calibration_data_loader:
            images, _ = batch
            images = images.to(device)
            model(images)
        model.eval()

    # 转换为量化模型
    quantized_model = torch.quantization.convert(model, inplace=False)
    return quantized_model


def load_and_quantize_model(weights_path, device, calibration_data_loader=None):
    """加载并量化YOLOv5模型"""
    # 加载原始模型
    model = Model('models/yolov5s.yaml').to(device)
    checkpoint = torch.load(weights_path, map_location=device)
    model.load_state_dict(checkpoint['model'].float().state_dict())
    model.eval()

    # 创建支持量化的模型包装器
    quantizable_model = QuantizedYOLOv5(model)

    # 准备量化
    quantizable_model = prepare_model_for_quantization(quantizable_model, device)

    # 量化模型
    quantized_model = quantize_model(quantizable_model, device, calibration_data_loader)

    return quantized_model


def benchmark_model(model, device, input_shape=(1, 3, 640, 640), iterations=100):
    """基准测试模型性能"""
    model.eval()
    input_tensor = torch.randn(input_shape).to(device)

    # 预热
    with torch.no_grad():
        for _ in range(10):
            model(input_tensor)

    # 计时
    start_time = torch.cuda.Event(enable_timing=True)
    end_time = torch.cuda.Event(enable_timing=True)

    start_time.record()
    with torch.no_grad():
        for _ in range(iterations):
            model(input_tensor)
    end_time.record()

    torch.cuda.synchronize()
    avg_time_ms = start_time.elapsed_time(end_time) / iterations
    fps = 1000 / avg_time_ms

    print(f"平均推理时间: {avg_time_ms:.2f} ms")
    print(f"推理帧率: {fps:.2f} FPS")

    return avg_time_ms, fps

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', type=str, required=True, help='输入模型权重路径')
    parser.add_argument('--output', type=str, required=True, help='量化后模型保存路径')
    parser.add_argument('--device', default='', help='cuda设备，例如 0 或 cpu')
    parser.add_argument('--calibrate', action='store_true', help='使用校准数据进行量化')
    args = parser.parse_args()

    device = select_device(args.device)

    # 加载并量化模型
    quantized_model = load_and_quantize_model(args.weights, device)

    # 基准测试量化后的模型
    benchmark_model(quantized_model, device)

    # 保存量化后的模型
    torch.save(quantized_model.state_dict(), args.output)
    print(f"量化后的模型已保存至 {args.output}")