import os
import sys
from pathlib import Path

# 使用传统方式，但更加健壮
PROJECT_ROOT_STR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if PROJECT_ROOT_STR not in sys.path:
    sys.path.insert(0, PROJECT_ROOT_STR)
from config.path_config import *
"""PPQ图形分派和路径分析模块"""
os.environ['TORCH_CUDA_ARCH_LIST'] = '7.5'
from collections import deque
from typing import Set, List, Tuple, Dict, Any, Optional
from ppq import BaseGraph, TargetPlatform, QuantizationSettingFactory
from ppq.api import (load_onnx_graph, dispatch_graph,
                     quantize_onnx_model,ENABLE_CUDA_KERNEL,
                     export_ppq_graph)
from config.quantization_config import QuantizationConfig
import torch.utils.data
from ppq.quantization.analyse.graphwise import graphwise_error_analyse
from ppq.quantization.analyse.layerwise import layerwise_error_analyse
from dataloader.CalibrationDatasetBuilder import CalibrationDatasetBuilder

class GraphLoader:
    """图形加载器"""
    
    @staticmethod
    def load_onnx_model(onnx_path: str, platform: TargetPlatform) -> BaseGraph:
        """加载ONNX模型"""
        if not os.path.exists(onnx_path):
            raise FileNotFoundError(f'ONNX 文件未找到: {onnx_path}')
        
        print('正在加载ONNX模型...')
        graph = load_onnx_graph(onnx_import_file=onnx_path)
        graph = dispatch_graph(graph=graph, platform=platform)
        return graph
    
    @staticmethod
    def validate_graph_inputs(graph: BaseGraph, input_shapes: Dict[str, List[int]]) -> None:
        """验证图的输入"""
        for name in graph.inputs:
            if name not in input_shapes:
                raise KeyError(f'Graph Input {name} 需要指定有效shape.')
        if len(graph.outputs) != 1:
            raise ValueError('此脚本要求graph只能有1个输出。')

class PathFinder:
    """路径查找器"""
    
    def __init__(self, graph: BaseGraph):
        self.graph = graph
    
    def find_path_operations(self, split_op, concat_op, prefix: str) -> Set:
        """查找从split到concat路径上的所有操作"""
        ops_on_path = set()
        queue = deque([[split_op]])
        found_any_path = False
        
        while queue:
            path = queue.popleft()
            last_op = path[-1]
            
            if last_op == concat_op:
                found_any_path = True
                for op_in_path in path:
                    ops_on_path.add(op_in_path)
                continue
            
            # 扩展到outputs的op，且同前缀下
            for output_var in last_op.outputs:
                for next_op in self.graph.variables[output_var.name].dest_ops:
                    if next_op.name.startswith(prefix) and next_op not in path:
                        queue.append(path + [next_op])
        
        return ops_on_path if found_any_path else set()

class GraphDispatcher:
    """图形分派器"""
    
    def __init__(self, graph: BaseGraph, quantization_setting):
        self.graph = graph
        self.qs = quantization_setting
        self.path_finder = PathFinder(graph)
    
    def assign_split_concat_path_to_fp32(self, 
                                       prefix_key: str = 'Split', 
                                       end_key: str = 'Concat', 
                                       include_edge: bool = True, 
                                       platform: TargetPlatform = TargetPlatform.FP32) -> None:
        """将Split到Concat路径上的操作分配到FP32"""
        # 收集所有Split和Concat节点及其前缀
        split_ops = []
        concat_ops_dict = {}
        
        for op in self.graph.operations.values():
            if prefix_key in op.name:
                prefix = op.name.split(prefix_key)[0]
                split_ops.append((prefix, op))
            if end_key in op.name:
                prefix = op.name.split(end_key)[0]
                concat_ops_dict.setdefault(prefix, []).append(op)
        
        # 对每个Split搜寻同前缀的Concat
        for prefix, split_op in split_ops:
            concat_ops = concat_ops_dict.get(prefix, [])
            if not concat_ops:
                print(f"!!! 未找到 Split {split_op.name} ({prefix}) 的同组 Concat，跳过")
                continue
                
            for concat_op in concat_ops:
                ops_on_path = self.path_finder.find_path_operations(split_op, concat_op, prefix)
                
                if not ops_on_path:
                    print(f"!!! Split {split_op.name} 到 Concat {concat_op.name} 无可达路径，跳过")
                    continue
                
                # 处理include边界与否
                for op in ops_on_path:
                    if (not include_edge) and (op == split_op or op == concat_op):
                        continue
                    self.qs.dispatching_table.append(op.name, platform)
                
                print(f'[PPQ分派] {split_op.name}→{concat_op.name} 之间共{len(ops_on_path)}个op已调度为 {platform}')
    
    def apply_fp32_dispatch_for_sensitive_layers(self, sensitive_layers: List[Tuple[str, float]]) -> None:
        """将敏感层分派到FP32"""
        for op_name, error in sensitive_layers:
            self.qs.dispatching_table.append(operation=op_name, platform=TargetPlatform.FP32)
            print(f"将高误差层 {op_name} (误差: {error:.6f}) 分派到 FP32")
            
class QuantizationSettingsManager:
    """量化设置管理器"""
    
    def __init__(self, config: QuantizationConfig):
        self.config = config
        self.qs = None
    
    def setup_basic_settings(self):
        """设置基础量化参数"""
        self.qs = QuantizationSettingFactory.default_setting()
        self.qs.fusion = self.config.fusion
        self.qs.lsq_optimization = self.config.lsq_optimization
        return self.qs
    
    def apply_advanced_optimization_settings(self, 
                                           equalization: bool = True,
                                           opt_level: int = 2,
                                           eq_iterations: int = 10,
                                           value_threshold: float = 0.5,
                                           lsq_block_size: int = 8,
                                           lsq_lr: float = 1e-5,
                                           lsq_steps: int = 50) -> None:
        """应用高级优化设置"""
        if self.qs is None:
            raise ValueError("请先调用setup_basic_settings()初始化设置")
        
        # 设置equalization参数
        self.qs.equalization = equalization
        self.qs.equalization_setting.opt_level = opt_level
        self.qs.equalization_setting.iterations = eq_iterations
        self.qs.equalization_setting.value_threshold = value_threshold
        
        # 设置LSQ优化参数
        self.qs.lsq_optimization = True
        self.qs.lsq_optimization_setting.block_size = lsq_block_size
        self.qs.lsq_optimization_setting.collecting_device = self.config.device
        self.qs.lsq_optimization_setting.is_scale_trainable = True
        self.qs.lsq_optimization_setting.lr = lsq_lr
        self.qs.lsq_optimization_setting.steps = lsq_steps
    
    def set_calibration_algorithm(self, algorithm: str) -> None:
        """设置校准算法"""
        if self.qs is None:
            raise ValueError("请先调用setup_basic_settings()初始化设置")
        
        self.qs.quantize_activation_setting.calib_algorithm = algorithm
        print(f"设置校准算法为: {algorithm}")
    
    def get_settings(self):
        """获取当前量化设置"""
        return self.qs
    
class PPQQuantizer:
    """PPQ量化执行器"""
    
    def __init__(self, config: QuantizationConfig):
        self.config = config
    
    def quantize_model(self, 
                      calib_loader: torch.utils.data.DataLoader,
                      quantization_setting,
                      collate_fn) -> BaseGraph:
        """执行模型量化"""
        print('正在进行量化...')
        try:
            with ENABLE_CUDA_KERNEL():
                quantized_graph = quantize_onnx_model(
                    onnx_import_file=self.config.onnx_path,
                    calib_dataloader=calib_loader,
                    calib_steps=self.config.calib_steps,
                    do_quantize=True,
                    input_shape=self.config.input_shapes['images'],
                    setting=quantization_setting,
                    collate_fn=collate_fn,
                    platform=self.config.quant_platform,
                    device=self.config.device,
                    verbose=0
                )
                return quantized_graph
        except Exception as e:
            raise RuntimeError("模型量化阶段出错: " + str(e))

class QuantizationAnalyzer:
    """量化误差分析器"""
    
    def __init__(self, config: QuantizationConfig):
        self.config = config
        self.quantizer = PPQQuantizer(config)
    
    def analyze_layer_sensitivity(self, 
                                 quantized_graph: BaseGraph,
                                 calib_loader: torch.utils.data.DataLoader,
                                 collate_fn,
                                 top_k: int = 10) -> List[Tuple[str, float]]:
        """分析层级敏感性并返回误差最大的层"""
        print("[layerwise] 层级误差分析中...")
        with ENABLE_CUDA_KERNEL():
            reports = layerwise_error_analyse(
                graph=quantized_graph, 
                running_device=self.config.device,
                collate_fn=collate_fn, 
                dataloader=calib_loader
            )
        
        # 从大到小排序单层误差
        sensitivity = [(op_name, error) for op_name, error in reports.items()]
        sensitivity = sorted(sensitivity, key=lambda x: x[1], reverse=True)
        
        return sensitivity[:top_k]
    
    def analyze_graph_wise_error(self,
                                quantized_graph: BaseGraph,
                                calib_loader: torch.utils.data.DataLoader,
                                collate_fn) -> Any:
        """分析图级误差"""
        print("[graphwise] 图级误差分析中...")
        with ENABLE_CUDA_KERNEL():
            reports = graphwise_error_analyse(
                graph=quantized_graph, 
                running_device=self.config.device,
                collate_fn=collate_fn, 
                dataloader=calib_loader
            )
        
        return reports

class CalibrationOptimizer:
    """校准算法优化器"""
    
    def __init__(self, config: QuantizationConfig):
        self.config = config
        self.quantizer = PPQQuantizer(config)
        self.analyzer = QuantizationAnalyzer(config)
    
    def optimize_calibration_algorithms(self,
                                      calib_loader: torch.utils.data.DataLoader,
                                      quantization_setting,
                                      collate_fn,
                                      algorithms: Optional[List[str]] = None) -> str:
        """优化校准算法并返回最佳算法"""
        if algorithms is None:
            algorithms = ['minmax', 'percentile', 'kl', 'mse']
        
        best_algorithm = None
        best_error = float('inf')
        
        for calib_algo in algorithms:
            try:
                print(f"测试校准算法: {calib_algo}")
                
                # 创建设置副本以避免修改原设置
                temp_setting = quantization_setting
                temp_setting.quantize_activation_setting.calib_algorithm = calib_algo
                
                # 执行临时量化
                temp_quantized = self.quantizer.quantize_model(
                    calib_loader, temp_setting, collate_fn
                )
                
                # 分析误差
                print(f'Error Report of Algorithm {calib_algo}: ')
                reports = self.analyzer.analyze_graph_wise_error(
                    temp_quantized, calib_loader, collate_fn
                )
                
                # reports为字典，每个key为层名，value为误差
                if isinstance(reports, dict) and len(reports) > 0:
                    current_error = max(reports.values())
                else:
                    current_error = 0.0
                
                if current_error < best_error:
                    best_error = current_error
                    best_algorithm = calib_algo
                    
            except Exception as e:
                print(f"算法 {calib_algo} 执行失败: {e}")
                continue
        
        return best_algorithm or 'percentile'

class ModelExporter:
    """模型导出器"""
    
    def __init__(self, config: QuantizationConfig):
        self.config = config
    
    def export_quantized_model(self, quantized_graph: BaseGraph) -> None:
        """导出量化模型"""
        print(f"导出量化ONNX模型: {self.config.onnx_output_path}")
        
        # 确保输出目录存在
        os.makedirs(os.path.dirname(self.config.onnx_output_path), exist_ok=True)
        os.makedirs(os.path.dirname(self.config.ppq_json_output_path), exist_ok=True)
        
        export_ppq_graph(
            graph=quantized_graph,
            platform=self.config.quant_platform,
            graph_save_to=self.config.onnx_output_path,
            config_save_to=self.config.ppq_json_output_path
        )
        
        print(f"模型已导出到: {self.config.onnx_output_path}")
        print(f"配置已导出到: {self.config.ppq_json_output_path}")
         
class QuantizationPipeline:
    """量化流水线主类""" 
    
    def __init__(self, config: QuantizationConfig):
        self.config = config
        self.graph = None
        self.quantized_graph = None
        self.calib_loader = None
        
        # 初始化各个组件
        self.dataset_builder = CalibrationDatasetBuilder(config)
        self.settings_manager = QuantizationSettingsManager(config)
        self.quantizer = PPQQuantizer(config)
        self.analyzer = QuantizationAnalyzer(config)
        self.calib_optimizer = CalibrationOptimizer(config)
        self.exporter = ModelExporter(config)
        
    def load_model(self) -> BaseGraph:
        """加载ONNX模型"""
        self.graph = GraphLoader.load_onnx_model(
            self.config.onnx_path, 
            self.config.quant_platform
        )
        return self.graph

    def validate_graph_inputs(self) -> None:
        """验证图的输入"""
        GraphLoader.validate_graph_inputs(self.graph, self.config.input_shapes)
        
    def setup_quantization_settings(self) -> None:
        """设置量化参数"""
        self.qs = self.settings_manager.setup_basic_settings()

    def setup_calibration_dataset(self) -> torch.utils.data.DataLoader:
        """构建校准数据集"""
        self.calib_loader = self.dataset_builder.build_dataset()
        return self.calib_loader
    
    def collate_fn(self, batch: torch.Tensor) -> torch.Tensor:
        """数据整理函数"""
        return self.dataset_builder.collate_fn(batch)

    def assign_split_concat_path_to_fp32(self, 
                                       prefix_key: str = 'Split', 
                                       end_key: str = 'Concat', 
                                       include_edge: bool = True) -> None:
        """将Split到Concat路径上的操作分配到FP16"""
        dispatcher = GraphDispatcher(self.graph, self.qs)
        dispatcher.assign_split_concat_path_to_fp32(prefix_key, end_key, include_edge)

    def perform_basic_quantization(self) -> BaseGraph:
        """执行基础量化"""
        self.quantized_graph = self.quantizer.quantize_model(
            self.calib_loader, 
            self.qs, 
            self.collate_fn
        )
        return self.quantized_graph

    def analyze_layer_sensitivity(self, top_k: int = 10) -> List[Tuple[str, float]]:
        """分析层级敏感性并返回误差最大的层"""
        return self.analyzer.analyze_layer_sensitivity(
            self.quantized_graph,
            self.calib_loader,
            self.collate_fn,
            top_k
        )

    def apply_fp32_dispatch_for_sensitive_layers(self, sensitive_layers: List[Tuple[str, float]]) -> None:
        """将敏感层分派到FP32"""
        dispatcher = GraphDispatcher(self.graph, self.qs)
        dispatcher.apply_fp32_dispatch_for_sensitive_layers(sensitive_layers)
    
    def optimize_calibration_algorithms(self, algorithms: List[str] = None) -> str:
        """优化校准算法并返回最佳算法"""
        return self.calib_optimizer.optimize_calibration_algorithms(
            self.calib_loader,
            self.qs,
            self.collate_fn,
            algorithms
        )

    def apply_advanced_optimization(self, 
                                  equalization: bool = True,
                                  opt_level: int = 1,
                                  eq_iterations: int = 10,
                                  value_threshold: float = 0.5,
                                  lsq_block_size: int = 8,
                                  lsq_lr: float = 1e-5,
                                  lsq_steps: int = 5000) -> BaseGraph:
        """应用高级优化设置"""
        # 应用高级优化设置
        self.settings_manager.apply_advanced_optimization_settings(
            equalization, opt_level, eq_iterations, value_threshold,
            lsq_block_size, lsq_lr, lsq_steps
        )
        
        # 重新执行量化
        self.quantized_graph = self.quantizer.quantize_model(
            self.calib_loader,
            self.qs,
            self.collate_fn
        )
        
        # 执行最终误差分析
        reports = self.analyzer.analyze_graph_wise_error(
            self.quantized_graph,
            self.calib_loader,
            self.collate_fn
        )
        
        return self.quantized_graph

    def export_quantized_model(self) -> None:
        """导出量化模型"""
        self.exporter.export_quantized_model(self.quantized_graph)

    def run_complete_pipeline(self, enable_advanced_optimization: bool = True) -> BaseGraph:
        """运行完整的量化流水线"""
        print("=" * 50)
        print("开始PPQ量化流水线")
        print("=" * 50)
        
        # 1. 设置量化参数
        print("步骤 1/10: 设置量化参数")
        self.setup_quantization_settings()
        
        # 2. 加载模型
        print("步骤 2/10: 加载ONNX模型")
        self.load_model()
     
        # 3. 验证输入
        print("步骤 3/10: 验证模型输入")
        self.validate_graph_inputs()    
            
        # 4. 设置Split-Concat路径为FP32
        print("步骤 4/10: 设置Split-Concat路径")
        self.assign_split_concat_path_to_fp32()
        
        # 5. 构建校准数据集
        print("步骤 5/10: 构建校准数据集")
        self.setup_calibration_dataset()
        
        # 6. 执行基础量化
        print("步骤 6/10: 执行基础量化")
        self.perform_basic_quantization()
        
        # 7. 分析敏感性并处理高误差层
        print("步骤 7/10: 分析层敏感性")
        sensitive_layers = self.analyze_layer_sensitivity(top_k=10)
        self.apply_fp32_dispatch_for_sensitive_layers(sensitive_layers)
        
        # 8. 优化校准算法
        print("步骤 8/10: 优化校准算法")
        best_algorithm = self.optimize_calibration_algorithms()
        self.settings_manager.set_calibration_algorithm(best_algorithm)
        print(f"选择最佳校准算法: {best_algorithm}")
        
        # 9. 应用高级优化（可选）
        print("步骤 9/10: 应用高级优化" if enable_advanced_optimization else "步骤 9/10: 重新量化")
        if enable_advanced_optimization:
            self.apply_advanced_optimization()
        else:
            # 重新量化以应用最佳算法
            self.perform_basic_quantization()
        
        # 10. 导出模型
        print("步骤 10/10: 导出量化模型")
        self.export_quantized_model()
        
        print("=" * 50)
        print("PPQ量化流水线完成！")
        print("=" * 50)
        
        return self.quantized_graph
    
    