import os
import sys
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir)
sys.path.insert(0, os.path.abspath(os.path.join(cur_dir, '..')))

os.environ['TORCH_CUDA_ARCH_LIST'] = '7.5'
import torch
import numpy as np
from typing import Iterable, Tuple
from dataclasses import dataclass
from pathlib import Path
from myppq.dataloader.dataloader import ImageFolderNoClass

# === ppq及相关包导入 ===
from ppq import (
    BaseGraph, QuantizationSettingFactory, TargetPlatform,
    convert_any_to_numpy, torch_snr_error
)
from ppq.api import (
    dispatch_graph, export_ppq_graph, load_onnx_graph, quantize_onnx_model
)
from ppq.core.data import convert_any_to_torch_tensor
from ppq.executor.torch import TorchExecutor
from ppq.quantization.analyse.graphwise import graphwise_error_analyse
from ppq.quantization.analyse.layerwise import layerwise_error_analyse
from ppq import *
from ppq.api import *

from torchvision import transforms
from PIL import Image
from tqdm import tqdm
import torch.utils.data

@dataclass
class QuantizationConfig:
    """量化配置参数类"""
    device: str = 'cuda'
    quant_platform: TargetPlatform = TargetPlatform.TRT_INT8
    input_shapes: Dict[str, List[int]] = None
    batch_size: int = 1
    calib_steps: int = 32
    num_workers: int = 8
    
    # 文件路径配置
    onnx_path: str = ""
    onnx_output_path: str = ""
    ppq_json_output_path: str = ""
    data_dir: str = ""
    
    # 量化设置
    fusion: bool = True
    lsq_optimization: bool = False
    
    def __post_init__(self):
        if self.input_shapes is None:
            self.input_shapes = {'images': [1, 3, 640, 640]}
        
        if not self.onnx_path:
            self.onnx_path = os.path.join(MODELS_DIR, 'onnx/yolov11n.onnx')
        if not self.onnx_output_path:
            self.onnx_output_path = os.path.join(MODELS_DIR, 'onnx/weights/yolov11n_int8.onnx')
        if not self.ppq_json_output_path:
            self.ppq_json_output_path = os.path.join(MODELS_DIR, 'onnx/weights/yolov11n_int8.json')
        if not self.data_dir:
            self.data_dir = os.path.join(DATA_DIR, 'coco128/images/train2017')

class QuantizationPipeline:
    """量化流水线主类""" 
    
    def __init__(self, config:QuantizationConfig):
        self.config = config
        self.graph = None
        self.quantized_graph = None
        self.calib_loader = None

    def setup_quantization_settings(self) -> None:
        """设置量化参数"""
        self.qs = QuantizationSettingFactory.default_setting()
        self.qs.fusion = self.config.fusion
        self.qs.lsq_optimization = self.config.lsq_optimization

    def load_model(self) -> BaseGraph:
        """加载ONNX模型"""
        if not os.path.exists(self.config.onnx_path):
            raise FileNotFoundError(f'ONNX 文件未找到: {self.config.onnx_path}')
        
        print('正在加载ONNX模型...')
        self.graph = load_onnx_graph(onnx_import_file=self.config.onnx_path)
        self.graph = dispatch_graph(graph=self.graph, platform=self.config.quant_platform)
        return self.graph

    def validate_graph_inputs(self) -> None:
        """验证图的输入"""
        for name in self.graph.inputs:
            if name not in self.config.input_shapes:
                raise KeyError(f'Graph Input {name} 需要指定有效shape.')
        if len(self.graph.outputs) != 1:
            raise ValueError('此脚本要求graph只能有1个输出。')
        
    def setup_calibration_dataset(self) -> torch.utils.data.DataLoader:
        """构建校准数据集"""
        preprocess = transforms.Compose([
            transforms.Resize((640, 640)),
            transforms.ToTensor(),
        ])
        
        if not os.path.isdir(self.config.data_dir):
            raise FileNotFoundError(f'数据目录未找到: {self.config.data_dir}')
        
        try:
            calib_dataset = ImageFolderNoClass(self.config.data_dir, transform=preprocess)
            self.calib_loader = torch.utils.data.DataLoader(
                calib_dataset, 
                batch_size=self.config.batch_size, 
                pin_memory=True, 
                num_workers=self.config.num_workers
            )
            return self.calib_loader
        except Exception as e:
            raise RuntimeError("校准数据集加载失败: " + str(e))
    
    def collate_fn(self, batch: torch.Tensor) -> torch.Tensor:
        """数据整理函数"""
        return batch.to(self.config.device)

    def assign_split_concat_path_to_fp32(self, prefix_key: str = 'Split', 
                                       end_key: str = 'Concat', 
                                       include_edge: bool = True, 
                                       platform: TargetPlatform = TargetPlatform.FP16) -> None:
        """将Split到Concat路径上的操作分配到FP16"""
        from collections import deque
        
        # 收集所有Split和Concat节点及其前缀
        split_ops = []
        concat_ops_dict = {}
        
        for op in self.graph.operations.values():
            if prefix_key in op.name:
                prefix = op.name.split(prefix_key)[0]
                split_ops.append((prefix, op))
            if end_key in op.name:
                prefix = op.name.split(end_key)[0]
                concat_ops_dict.setdefault(prefix, []).append(op)
        
        # 对每个Split搜寻同前缀的Concat
        for prefix, split_op in split_ops:
            concat_ops = concat_ops_dict.get(prefix, [])
            if not concat_ops:
                print(f"!!! 未找到 Split {split_op.name} ({prefix}) 的同组 Concat，跳过")
                continue
                
            for concat_op in concat_ops:
                ops_on_path = self._find_path_operations(split_op, concat_op, prefix)
                
                if not ops_on_path:
                    print(f"!!! Split {split_op.name} 到 Concat {concat_op.name} 无可达路径，跳过")
                    continue
                
                # 处理include边界与否
                for op in ops_on_path:
                    if (not include_edge) and (op == split_op or op == concat_op):
                        continue
                    self.qs.dispatching_table.append(op.name, platform)
                
                print(f'[PPQ分派] {split_op.name}→{concat_op.name} 之间共{len(ops_on_path)}个op已调度为 {platform}')

    def _find_path_operations(self, split_op, concat_op, prefix: str) -> set:
        """查找从split到concat路径上的所有操作"""
        from collections import deque
        
        ops_on_path = set()
        queue = deque([[split_op]])
        found_any_path = False
        
        while queue:
            path = queue.popleft()
            last_op = path[-1]
            
            if last_op == concat_op:
                found_any_path = True
                for op_in_path in path:
                    ops_on_path.add(op_in_path)
                continue
            
            # 扩展到outputs的op，且同前缀下
            for output_var in last_op.outputs:
                for next_op in self.graph.variables[output_var.name].dest_ops:
                    if next_op.name.startswith(prefix) and next_op not in path:
                        queue.append(path + [next_op])
        
        return ops_on_path if found_any_path else set()

    def perform_basic_quantization(self) -> BaseGraph:
        """执行基础量化"""
        print('正在进行量化...')
        try:
            with ENABLE_CUDA_KERNEL():
                self.quantized_graph = quantize_onnx_model(
                    onnx_import_file=self.config.onnx_path,
                    calib_dataloader=self.calib_loader,
                    calib_steps=self.config.calib_steps,
                    do_quantize=True,
                    input_shape=self.config.input_shapes['images'],
                    setting=self.qs,
                    collate_fn=self.collate_fn,
                    platform=self.config.quant_platform,
                    device=self.config.device,
                    verbose=0
                )
                return self.quantized_graph
        except Exception as e:
            raise RuntimeError("模型量化阶段出错: " + str(e))

    def analyze_layer_sensitivity(self, top_k: int = 10) -> List[Tuple[str, float]]:
        """分析层级敏感性并返回误差最大的层"""
        print("[layerwise] 层级误差分析中...")
        with ENABLE_CUDA_KERNEL():
            reports = layerwise_error_analyse(
                graph=self.quantized_graph, 
                running_device=self.config.device,
                collate_fn=self.collate_fn, 
                dataloader=self.calib_loader
            )
        
        # 从大到小排序单层误差
        sensitivity = [(op_name, error) for op_name, error in reports.items()]
        sensitivity = sorted(sensitivity, key=lambda x: x[1], reverse=True)
        
        return sensitivity[:top_k]

    def apply_fp32_dispatch_for_sensitive_layers(self, sensitive_layers: List[Tuple[str, float]]) -> None:
        """将敏感层分派到FP32"""
        for op_name, error in sensitive_layers:
            self.qs.dispatching_table.append(operation=op_name, platform=TargetPlatform.FP32)
            print(f"将高误差层 {op_name} (误差: {error:.6f}) 分派到 FP32")
    
    def optimize_calibration_algorithms(self, algorithms: List[str] = None) -> str:
        """优化校准算法并返回最佳算法"""
        if algorithms is None:
            algorithms = ['minmax', 'percentile', 'kl', 'mse']
        
        best_algorithm = None
        best_error = float('inf')
        
        for calib_algo in algorithms:
            try:
                self.qs.quantize_activation_setting.calib_algorithm = calib_algo
                
                with ENABLE_CUDA_KERNEL():
                    temp_quantized = quantize_onnx_model(
                        onnx_import_file=self.config.onnx_path,
                        calib_dataloader=self.calib_loader,
                        calib_steps=self.config.calib_steps,
                        do_quantize=True,
                        input_shape=self.config.input_shapes['images'],
                        setting=self.qs,
                        collate_fn=self.collate_fn,
                        platform=self.config.quant_platform,
                        device=self.config.device,
                        verbose=0
                    )
                    
                    print(f'Error Report of Algorithm {calib_algo}: ')
                    reports = graphwise_error_analyse(
                        graph=temp_quantized, 
                        running_device=self.config.device,
                        collate_fn=self.collate_fn, 
                        dataloader=self.calib_loader
                    )
                    
                    # 假设reports返回一个误差值，您需要根据实际返回格式调整
                    current_error = reports if isinstance(reports, (int, float)) else 0.0
                    
                    if current_error < best_error:
                        best_error = current_error
                        best_algorithm = calib_algo
                        
            except Exception as e:
                print(f"算法 {calib_algo} 执行失败: {e}")
                continue
        
        return best_algorithm or 'percentile'

    def apply_advanced_optimization(self, 
                                  equalization: bool = True,
                                  opt_level: int = 2,
                                  eq_iterations: int = 10,
                                  value_threshold: float = 0.5,
                                  lsq_block_size: int = 8,
                                  lsq_lr: float = 1e-5,
                                  lsq_steps: int = 50) -> BaseGraph:
        """应用高级优化设置"""
        # 设置equalization参数
        self.qs.equalization = equalization
        self.qs.equalization_setting.opt_level = opt_level
        self.qs.equalization_setting.iterations = eq_iterations
        self.qs.equalization_setting.value_threshold = value_threshold
        
        # 设置LSQ优化参数
        self.qs.lsq_optimization = True
        self.qs.lsq_optimization_setting.block_size = lsq_block_size
        self.qs.lsq_optimization_setting.collecting_device = self.config.device
        self.qs.lsq_optimization_setting.is_scale_trainable = True
        self.qs.lsq_optimization_setting.lr = lsq_lr
        self.qs.lsq_optimization_setting.steps = lsq_steps
        
        with ENABLE_CUDA_KERNEL():
            self.quantized_graph = quantize_onnx_model(
                onnx_import_file=self.config.onnx_path,
                calib_dataloader=self.calib_loader,
                calib_steps=self.config.calib_steps,
                do_quantize=True,
                input_shape=self.config.input_shapes['images'],
                setting=self.qs,
                collate_fn=self.collate_fn,
                platform=self.config.quant_platform,
                device=self.config.device,
                verbose=0
            )
            
            # 执行最终误差分析
            reports = graphwise_error_analyse(
                graph=self.quantized_graph,
                running_device=self.config.device,
                collate_fn=self.collate_fn,
                dataloader=self.calib_loader
            )
            
        return self.quantized_graph

    def export_quantized_model(self) -> None:
        """导出量化模型"""
        print(f"导出量化ONNX模型: {self.config.onnx_output_path}")
        
        # 确保输出目录存在
        os.makedirs(os.path.dirname(self.config.onnx_output_path), exist_ok=True)
        os.makedirs(os.path.dirname(self.config.ppq_json_output_path), exist_ok=True)
        
        export_ppq_graph(
            graph=self.quantized_graph,
            platform=self.config.quant_platform,
            graph_save_to=self.config.onnx_output_path,
            config_save_to=self.config.ppq_json_output_path
        )

    def run_complete_pipeline(self, enable_advanced_optimization: bool = True) -> BaseGraph:
        """运行完整的量化流水线"""
        # 1. 设置量化参数
        self.setup_quantization_settings()
        
        # 2. 加载模型
        self.load_model()
     
        # 3. 验证输入
        self.validate_graph_inputs()    
            
        # 4. 设置Split-Concat路径为FP32
        self.assign_split_concat_path_to_fp32()
        
        # 5. 构建校准数据集
        self.setup_calibration_dataset()
        
        # 6. 执行基础量化
        self.perform_basic_quantization()
        
        # 7. 分析敏感性并处理高误差层
        sensitive_layers = self.analyze_layer_sensitivity(top_k=10)
        self.apply_fp32_dispatch_for_sensitive_layers(sensitive_layers)
        
        # 8. 优化校准算法
        best_algorithm = self.optimize_calibration_algorithms()
        self.qs.quantize_activation_setting.calib_algorithm = best_algorithm
        print(f"选择最佳校准算法: {best_algorithm}")
        
        # 9. 应用高级优化（可选）
        if enable_advanced_optimization:
            self.apply_advanced_optimization()
        else:
            # 重新量化以应用最佳算法
            self.perform_basic_quantization()
        
        # 10. 导出模型
        self.export_quantized_model()
        
        return self.quantized_graph

def create_default_config() -> QuantizationConfig:
    """创建默认配置"""
    return QuantizationConfig()

def main(): 
    """主函数 - 演示如何使用重构后的代码"""
    # 创建配置
    config = create_default_config()
    
    """可以自定义配置""" 
    # config.calib_steps = 50
    # config.batch_size = 2

    # 创建量化流水线
    pipeline = QuantizationPipeline(config)
    
    # 运行完整流水线
    try:
        quantized_model = pipeline.run_complete_pipeline(enable_advanced_optimization=True)
        print("量化完成！")
    except Exception as e:
        print(f"量化过程出错: {e}")
        raise
    
def run_custom_pipeline():
    """自定义流水线示例"""
    config = create_default_config()
    pipeline = QuantizationPipeline(config)
    
    # 步骤化执行
    pipeline.setup_quantization_settings()
    pipeline.load_model()
    pipeline.setup_calibration_dataset()
    
    # 只执行基础量化，不进行高级优化
    pipeline.perform_basic_quantization()
    
    # 自定义敏感性分析
    sensitive_layers = pipeline.analyze_layer_sensitivity(top_k=5)  # 只处理前5个
    pipeline.apply_fp32_dispatch_for_sensitive_layers(sensitive_layers)
    
    # 导出
    pipeline.export_quantized_model()
    

if __name__ == "__main__":
    main()