"""PyTorch FP16量化（模型压缩+加速）"""
"""
模型量化模块
提供多种模型量化策略，包括4-bit、8-bit和动态量化等
"""
import torch
from transformers import BitsAndBytesConfig, AutoConfig
from typing import Dict, Any, Union
import logging

logger = logging.getLogger(__name__)


class ModelQuantizer:
    """模型量化器，提供多种量化策略"""
    
    @staticmethod
    def get_quantization_config(quantization_type: str, config: Dict[str, Any] = None) -> Union[BitsAndBytesConfig, Dict[str, Any]]:
        """
        获取量化配置
        
        Args:
            quantization_type: 量化类型，支持 '4bit', '8bit', 'fp16', 'bf16', 'dynamic'
            config: 额外配置参数
            
        Returns:
            量化配置对象或参数字典
        """
        if config is None:
            config = {}
            
        logger.info(f"获取量化配置: {quantization_type}")
        
        if quantization_type == '4bit':
            return BitsAndBytesConfig(
                load_in_4bit=True,
                bnb_4bit_use_double_quant=config.get('use_double_quant', True),
                bnb_4bit_quant_type=config.get('quant_type', 'nf4'),  # 'fp4' or 'nf4'
                bnb_4bit_compute_dtype=getattr(torch, config.get('compute_dtype', 'float16'))
            )
        elif quantization_type == '8bit':
            return BitsAndBytesConfig(
                load_in_8bit=True,
                llm_int8_threshold=config.get('threshold', 6.0)
            )
        elif quantization_type == 'fp16':
            return {'torch_dtype': torch.float16}
        elif quantization_type == 'bf16':
            return {'torch_dtype': torch.bfloat16}
        elif quantization_type == 'dynamic':
            # 动态量化配置将在模型加载后应用
            return {'dynamic_quantization': True}
        else:
            logger.warning(f"不支持的量化类型: {quantization_type}，返回空配置")
            return {}
    
    @staticmethod
    def quantize_model(model, quantization_type: str, config: Dict[str, Any] = None) -> torch.nn.Module:
        """
        对模型进行量化
        
        Args:
            model: 要量化的模型
            quantization_type: 量化类型
            config: 量化配置参数
            
        Returns:
            量化后的模型
        """
        if config is None:
            config = {}
            
        logger.info(f"对模型应用量化: {quantization_type}")
        
        try:
            if quantization_type == 'dynamic':
                # 应用动态量化
                quantized_model = torch.quantization.quantize_dynamic(
                    model,
                    {torch.nn.Linear},  # 要量化的层类型
                    dtype=getattr(torch, config.get('dtype', 'qint8'))
                )
                return quantized_model
            elif quantization_type in ['int8', 'uint8']:
                # 静态量化准备
                model.eval()
                model.qconfig = torch.quantization.get_default_qconfig(config.get('backend', 'fbgemm'))
                torch.quantization.prepare(model, inplace=True)
                # 这里需要一个校准数据集来进行静态量化
                # 简化实现，实际应用中需要添加校准步骤
                quantized_model = torch.quantization.convert(model, inplace=False)
                return quantized_model
            else:
                logger.warning(f"量化类型 '{quantization_type}' 需要在模型加载时应用")
                return model
        except Exception as e:
            logger.error(f"模型量化失败: {str(e)}")
            return model
    
    @staticmethod
    def get_quantized_model_size(model, original_size: float = None) -> Dict[str, float]:
        """
        获取量化后模型的大小和压缩率
        
        Args:
            model: 量化后的模型
            original_size: 原始模型大小（MB）
            
        Returns:
            包含模型大小和压缩率的字典
        """
        # 计算模型大小（近似值）
        param_size = 0
        for param in model.parameters():
            param_size += param.nelement() * param.element_size()
        buffer_size = 0
        for buffer in model.buffers():
            buffer_size += buffer.nelement() * buffer.element_size()
        
        size_mb = (param_size + buffer_size) / 1024**2
        
        result = {
            'size_mb': size_mb,
            'compression_rate': 1.0
        }
        
        if original_size is not None:
            result['compression_rate'] = original_size / size_mb
            
        logger.info(f"模型量化后大小: {size_mb:.2f} MB，压缩率: {result['compression_rate']:.2f}x")
        
        return result
    
    @staticmethod
    def compare_quantization_strategies(model, model_path: str, tokenizer, test_data: Any) -> Dict[str, Dict[str, Any]]:
        """
        比较不同量化策略的效果
        
        Args:
            model: 原始模型
            model_path: 模型路径
            tokenizer: 分词器
            test_data: 测试数据
            
        Returns:
            不同量化策略的比较结果
        """
        from transformers import AutoModelForCausalLM
        import time
        import numpy as np
        
        results = {}
        quantization_types = ['none', '4bit', '8bit', 'fp16', 'dynamic']
        
        # 计算原始模型大小
        original_size = ModelQuantizer.get_quantized_model_size(model)['size_mb']
        
        for q_type in quantization_types:
            try:
                logger.info(f"测试量化策略: {q_type}")
                
                start_time = time.time()
                
                if q_type == 'none':
                    # 加载原始模型
                    quant_model = AutoModelForCausalLM.from_pretrained(model_path)
                else:
                    # 获取量化配置
                    quant_config = ModelQuantizer.get_quantization_config(q_type)
                    if 'dynamic_quantization' in quant_config:
                        # 先加载全精度模型，然后应用动态量化
                        quant_model = AutoModelForCausalLM.from_pretrained(model_path)
                        quant_model = ModelQuantizer.quantize_model(quant_model, q_type)
                    else:
                        # 在加载时应用量化
                        quant_model = AutoModelForCausalLM.from_pretrained(
                            model_path,
                            quantization_config=quant_config if q_type in ['4bit', '8bit'] else None,
                            torch_dtype=quant_config.get('torch_dtype') if q_type in ['fp16', 'bf16'] else None,
                            device_map='auto'
                        )
                
                load_time = time.time() - start_time
                
                # 移动到GPU（如果可用）
                device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
                if q_type not in ['4bit', '8bit'] or not hasattr(quant_model, 'is_loaded_in_4bit'):
                    quant_model = quant_model.to(device)
                
                # 简单推理测试
                quant_model.eval()
                start_time = time.time()
                
                # 准备测试输入
                if test_data and len(test_data) > 0:
                    sample_text = test_data[0].get('text', '测试文本')
                    inputs = tokenizer(sample_text, return_tensors='pt').to(device)
                    
                    with torch.no_grad():
                        outputs = quant_model(**inputs)
                
                inference_time = time.time() - start_time
                
                # 获取模型大小信息
                size_info = ModelQuantizer.get_quantized_model_size(quant_model, original_size)
                
                results[q_type] = {
                    'load_time': load_time,
                    'inference_time': inference_time,
                    'size_mb': size_info['size_mb'],
                    'compression_rate': size_info['compression_rate'],
                    'success': True
                }
                
            except Exception as e:
                logger.error(f"量化策略测试失败 {q_type}: {str(e)}")
                results[q_type] = {
                    'error': str(e),
                    'success': False
                }
        
        return results


# 全局量化器实例
model_quantizer = ModelQuantizer()


def get_quantization_config(quantization_type: str, config: Dict[str, Any] = None):
    """获取量化配置的便捷函数"""
    return model_quantizer.get_quantization_config(quantization_type, config)


def quantize_model(model, quantization_type: str, config: Dict[str, Any] = None):
    """量化模型的便捷函数"""
    return model_quantizer.quantize_model(model, quantization_type, config)


def get_quantized_model_size(model, original_size: float = None):
    """获取量化后模型大小的便捷函数"""
    return model_quantizer.get_quantized_model_size(model, original_size)


def compare_quantization_strategies(model, model_path: str, tokenizer, test_data: Any):
    """比较不同量化策略的便捷函数"""
    return model_quantizer.compare_quantization_strategies(model, model_path, tokenizer, test_data)