import os
import sys
import onnx
import onnx_graphsurgeon as gs
import numpy as np
import glob
import cv2
import onnxruntime as ort
from typing import List, Set, Dict, Tuple

# === 工程路径配置 ===
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if PROJECT_ROOT not in sys.path:
    sys.path.insert(0, PROJECT_ROOT)

# 路径配置
NMS_ONNX = os.path.join(PROJECT_ROOT, 'onnx/yolov11n_nms.onnx')
SAVE_BEFORE = os.path.join(PROJECT_ROOT, 'onnx/nms_before.onnx')
SAVE_AFTER = os.path.join(PROJECT_ROOT, 'onnx/nms_after.onnx')
DATA_DIR = os.path.join(PROJECT_ROOT, 'datasets/coco128/images/train2017')
LABEL_DIR = os.path.join(PROJECT_ROOT, 'datasets/coco128/labels/train2017')

def ensure_dir(path):
    """确保目录存在"""
    os.makedirs(os.path.dirname(path), exist_ok=True)

def letterbox(img, new_shape=640, color=114):
    """图像预处理：letterbox缩放"""
    shape = img.shape[:2]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    img_resized = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
    dw /= 2
    dh /= 2
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    img_padded = cv2.copyMakeBorder(img_resized, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
    return img_padded, r, (dw, dh)

def preprocess_image(img_path, imgsz):
    """图像预处理"""
    img0 = cv2.imread(img_path)
    if img0 is None:
        raise ValueError(f"无法加载图像: {img_path}")
    img = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
    img, r, (dw, dh) = letterbox(img, new_shape=imgsz)
    img = img.astype(np.float32) / 255.0
    img = np.transpose(img, (2,0,1))[None]  # NCHW
    return img, img0, r, dw, dh

def create_intermediate_model(original_onnx_path, output_node_name, save_path):
    """创建输出到中间节点的临时模型"""
    print(f"创建中间模型，输出节点: {output_node_name}")
    
    model = onnx.load(original_onnx_path)
    graph = gs.import_onnx(model)
    
    # 找到目标节点
    target_node = None
    for node in graph.nodes:
        if node.name == output_node_name:
            target_node = node
            break
    
    if target_node is None:
        raise ValueError(f"找不到目标节点: {output_node_name}")
    
    # 创建新的输出
    new_outputs = list(target_node.outputs)
    
    # 创建新图
    new_graph = gs.Graph(
        nodes=graph.nodes,
        inputs=graph.inputs,
        outputs=new_outputs
    )
    
    new_graph.cleanup()
    new_model = gs.export_onnx(new_graph)
    onnx.save(new_model, save_path)
    
    print(f"中间模型已保存: {save_path}")
    return save_path

def fix_resize_nodes(graph: gs.Graph) -> None:
    """修复Resize节点，保留原始数值"""
    print("修复Resize节点...")
    
    for node in graph.nodes:
        if node.op == "Resize":
            print(f"处理Resize节点: {node.name}, 当前输入数: {len(node.inputs)}")
            
            # 保存原始输入的引用，避免丢失数据
            original_inputs = []
            for i, inp in enumerate(node.inputs):
                if inp is not None:
                    original_inputs.append(inp)
                    if hasattr(inp, 'values') and inp.values is not None:
                        print(f"  输入{i}值: shape={np.array(inp.values).shape}, data={inp.values}")
                    elif hasattr(inp, 'name'):
                        print(f"  输入{i}名: {inp.name}")
                else:
                    original_inputs.append(None)
                    print(f"  输入{i}: None")
            
            # 只有在输入缺失或无效时才修复
            needs_fix = False
            
            # 检查输入1 (roi) 是否需要修复
            if len(original_inputs) <= 1 or original_inputs[1] is None:
                needs_fix = True
                print(f"  需要修复ROI输入")
            
            # 检查输入2 (scales) 和输入3 (sizes) 
            has_valid_scales = (len(original_inputs) > 2 and 
                              original_inputs[2] is not None and
                              hasattr(original_inputs[2], 'values') and
                              original_inputs[2].values is not None and
                              len(original_inputs[2].values) > 0)
            
            has_valid_sizes = (len(original_inputs) > 3 and 
                             original_inputs[3] is not None and
                             hasattr(original_inputs[3], 'values') and
                             original_inputs[3].values is not None and
                             len(original_inputs[3].values) > 0)
            
            # 如果既没有有效的scales也没有有效的sizes，需要修复
            if not has_valid_scales and not has_valid_sizes:
                needs_fix = True
                print(f"  需要修复scales/sizes输入")
            
            # 添加：检查是否同时有scales和sizes（ONNX不允许）
            if has_valid_scales and has_valid_sizes:
                needs_fix = True
                print(f"  检测到同时有scales和sizes，需要修复以避免冲突")
            
            if not needs_fix:
                print(f"  节点{node.name}无需修复")
                continue
            
            print(f"  开始修复节点{node.name}")
            
            # 构建新的输入列表
            fixed_inputs = []
            
            # 输入0: X (数据输入) - 保持不变
            if len(original_inputs) > 0 and original_inputs[0] is not None:
                fixed_inputs.append(original_inputs[0])
            else:
                print(f"错误: Resize节点 {node.name} 缺少数据输入")
                continue
            
            # 输入1: roi - 优先使用原始值，否则使用空数组
            if (len(original_inputs) > 1 and original_inputs[1] is not None and
                hasattr(original_inputs[1], 'values')):
                fixed_inputs.append(original_inputs[1])
                print(f"  保留原始ROI: {original_inputs[1].values}")
            else:
                roi_constant = gs.Constant(
                    name=f"{node.name}_roi",
                    values=np.array([], dtype=np.float32)
                )
                fixed_inputs.append(roi_constant)
                print(f"  添加空ROI")
            
            # 关键修改：处理scales/sizes冲突，优先使用sizes
            if has_valid_sizes:
                # 有有效sizes，scales设为空
                scales_constant = gs.Constant(
                    name=f"{node.name}_scales_empty",
                    values=np.array([], dtype=np.float32)
                )
                fixed_inputs.append(scales_constant)
                fixed_inputs.append(original_inputs[3])  # 添加原始sizes
                print(f"  使用原始sizes: {original_inputs[3].values}")
                print(f"  scales设为空以避免冲突")
                
            elif has_valid_scales:
                # 只有有效scales
                fixed_inputs.append(original_inputs[2])
                print(f"  保留原始scales: {original_inputs[2].values}")
                # 不添加sizes
                
            else:
                # 都没有，创建默认scales
                scales_constant = gs.Constant(
                    name=f"{node.name}_scales_default",
                    values=np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
                )
                fixed_inputs.append(scales_constant)
                print(f"  添加默认scales: [1.0, 1.0, 2.0, 2.0]")
            
            # 替换输入
            node.inputs.clear()
            for inp in fixed_inputs:
                node.inputs.append(inp)
            
            print(f"  修复完成，输入数: {len(fixed_inputs)}")

def fix_graph_splitting_issues(graph, split_node_name):
    """修复图拆分中的常见问题"""
    
    print(f"修复图拆分问题，分割点: {split_node_name}")
    
    # 1. 确保所有常量节点都被正确复制
    constant_nodes = [node for node in graph.nodes if node.op == "Constant"]
    print(f"找到 {len(constant_nodes)} 个常量节点")
    
    # 2. 检查和修复 Transpose 节点
    for node in graph.nodes:
        if node.op == "Transpose":
            print(f"检查Transpose节点: {node.name}")
            if hasattr(node, 'attrs') and 'perm' in node.attrs:
                print(f"  原始perm: {node.attrs['perm']}")
            else:
                print(f"  警告: Transpose节点缺少perm属性")
    
    # 3. 修复 Resize 节点，保留原始参数
    fix_resize_nodes(graph)
    
    return graph

def split_model_improved(onnx_path, split_node_name):
    """改进的模型拆分函数，完全重写以解决变量引用问题"""
    
    print(f"开始改进的模型拆分: {split_node_name}")
    
    # 加载模型
    model = onnx.load(onnx_path)
    graph = gs.import_onnx(model)
    
    # 找到拆分节点
    split_node = None
    for node in graph.nodes:
        if node.name == split_node_name:
            split_node = node
            break
    
    if split_node is None:
        raise ValueError(f"找不到拆分节点: {split_node_name}")
    
    print(f"找到拆分节点: {split_node.name}, 输出数: {len(split_node.outputs)}")
    
    # === 创建 Before 模型 ===
    print("创建Before模型...")
    
    # 收集Before模型的节点（包括拆分节点）
    before_node_names = set()
    
    def collect_before_nodes(node):
        if node.name in before_node_names:
            return
        before_node_names.add(node.name)
        
        for inp in node.inputs:
            if isinstance(inp, gs.Variable):
                for producer in graph.nodes:
                    if inp in producer.outputs:
                        collect_before_nodes(producer)
                        break
    
    collect_before_nodes(split_node)
    before_nodes_list = [node for node in graph.nodes if node.name in before_node_names]
    
    print(f"Before模型包含 {len(before_nodes_list)} 个节点")
    
    # Before模型的输出就是拆分节点的输出
    before_outputs = list(split_node.outputs)
    
    # === 创建 After 模型 - 完全重写 ===
    print("创建After模型...")
    
    # 1. 找出所有不在Before模型中的节点
    after_nodes_list = [node for node in graph.nodes if node.name not in before_node_names]
    print(f"After模型初始包含 {len(after_nodes_list)} 个节点")
    
    # 2. 分析After模型需要的外部输入
    required_external_inputs = {}  # {变量名: 变量对象}
    
    for node in after_nodes_list:
        for inp in node.inputs:
            if isinstance(inp, gs.Variable) and hasattr(inp, 'name'):
                # 检查这个输入是否来自After内部
                is_internal = False
                for after_node in after_nodes_list:
                    if any(hasattr(out, 'name') and out.name == inp.name for out in after_node.outputs):
                        is_internal = True
                        break
                
                if not is_internal:
                    # 这是外部输入，检查是否来自Before模型
                    producer_in_before = False
                    for before_node in before_nodes_list:
                        if any(hasattr(out, 'name') and out.name == inp.name for out in before_node.outputs):
                            producer_in_before = True
                            required_external_inputs[inp.name] = inp
                            break
                    
                    if not producer_in_before:
                        # 检查是否是原始图的输入
                        is_graph_input = any(hasattr(graph_inp, 'name') and graph_inp.name == inp.name 
                                           for graph_inp in graph.inputs)
                        if is_graph_input:
                            required_external_inputs[inp.name] = inp
    
    print(f"After模型需要 {len(required_external_inputs)} 个外部输入:")
    for name, var in required_external_inputs.items():
        print(f"  {name}: shape={var.shape}")
    
    # 3. 创建After模型的输入变量（重新创建以避免引用冲突）
    after_inputs = []
    input_mapping = {}  # 旧变量名 -> 新变量对象
    
    for name, original_var in required_external_inputs.items():
        new_var = gs.Variable(
            name=name,
            dtype=original_var.dtype,
            shape=original_var.shape
        )
        after_inputs.append(new_var)
        input_mapping[name] = new_var
        print(f"创建After输入: {name}")
    
    # 4. 克隆After节点并修复输入引用
    cloned_after_nodes = []
    var_mapping = input_mapping.copy()  # 包含输入映射
    
    # 按拓扑顺序处理节点
    processed_nodes = set()
    
    def clone_node_with_deps(node):
        if node.name in processed_nodes:
            return
        
        # 先处理依赖
        for inp in node.inputs:
            if isinstance(inp, gs.Variable) and hasattr(inp, 'name'):
                # 找到产生这个输入的节点
                for dep_node in after_nodes_list:
                    if any(hasattr(out, 'name') and out.name == inp.name for out in dep_node.outputs):
                        clone_node_with_deps(dep_node)
                        break
        
        # 克隆当前节点
        cloned_inputs = []
        for inp in node.inputs:
            if isinstance(inp, gs.Variable) and hasattr(inp, 'name') and inp.name in var_mapping:
                cloned_inputs.append(var_mapping[inp.name])
            else:
                cloned_inputs.append(inp)
        
        # 创建新的输出变量
        cloned_outputs = []
        for out in node.outputs:
            if hasattr(out, 'name'):
                new_out = gs.Variable(
                    name=out.name,
                    dtype=out.dtype,
                    shape=out.shape
                )
                cloned_outputs.append(new_out)
                var_mapping[out.name] = new_out
            else:
                cloned_outputs.append(out)
        
        # 创建新节点
        cloned_node = gs.Node(
            op=node.op,
            name=node.name,
            inputs=cloned_inputs,
            outputs=cloned_outputs,
            attrs=node.attrs.copy() if hasattr(node, 'attrs') else {}
        )
        
        cloned_after_nodes.append(cloned_node)
        processed_nodes.add(node.name)
        print(f"克隆节点: {node.name}")
    
    # 克隆所有After节点
    for node in after_nodes_list:
        clone_node_with_deps(node)
    
    print(f"成功克隆 {len(cloned_after_nodes)} 个After节点")
    
    # 5. 找到最终输出
    final_outputs = []
    for original_output in graph.outputs:
        if hasattr(original_output, 'name') and original_output.name in var_mapping:
            final_outputs.append(var_mapping[original_output.name])
        else:
            final_outputs.append(original_output)
    
    # 6. 创建Before和After图
    before_graph = gs.Graph(
        nodes=before_nodes_list,
        inputs=graph.inputs,
        outputs=before_outputs
    )
    
    after_graph = gs.Graph(
        nodes=cloned_after_nodes,
        inputs=after_inputs,
        outputs=final_outputs
    )
    
    # 7. 应用修复
    before_graph = fix_graph_splitting_issues(before_graph, split_node_name)
    after_graph = fix_graph_splitting_issues(after_graph, split_node_name)
    
    # 8. 清理和保存
    print("清理Before模型...")
    before_graph.cleanup()
    
    print("清理After模型...")
    after_graph.cleanup()
    
    # 保存模型
    before_model = gs.export_onnx(before_graph)
    after_model = gs.export_onnx(after_graph)
    
    ensure_dir(SAVE_BEFORE)
    ensure_dir(SAVE_AFTER)
    
    onnx.save(before_model, SAVE_BEFORE)
    onnx.save(after_model, SAVE_AFTER)
    
    print("改进的模型拆分完成")
    print(f"Before模型保存至: {SAVE_BEFORE}")
    print(f"After模型保存至: {SAVE_AFTER}")
    
    return before_model, after_model

def validate_crop_point_output(original_onnx_path, before_onnx_path, test_images, split_node_name):
    """精确对比裁剪点处的输出差异"""
    
    print("=== 裁剪点输出验证 ===")
    
    # 创建原始模型的中间输出版本
    temp_original_path = "temp_original_to_split.onnx"
    create_intermediate_model(original_onnx_path, split_node_name, temp_original_path)
    
    # 加载模型
    original_session = ort.InferenceSession(temp_original_path)
    before_session = ort.InferenceSession(before_onnx_path)
    
    # 获取输入输出信息
    print(f"原始模型(到分割点)输入: {[inp.name for inp in original_session.get_inputs()]}")
    print(f"原始模型(到分割点)输出: {[out.name for out in original_session.get_outputs()]}")
    print(f"Before模型输入: {[inp.name for inp in before_session.get_inputs()]}")
    print(f"Before模型输出: {[out.name for out in before_session.get_outputs()]}")
    
    total_diff = 0
    max_diff = 0
    valid_images = 0
    
    for i, img_file in enumerate(test_images[:5]):  # 测试前5张图
        if not os.path.exists(img_file):
            print(f"跳过不存在的图片: {img_file}")
            continue
            
        print(f"\n--- 测试图片 {i+1}: {os.path.basename(img_file)} ---")
        
        try:
            # 预处理
            img_tensor, _, _, _, _ = preprocess_image(img_file, 640)
            
            # 原始模型推理（到分割点）
            original_outputs = original_session.run(None, {original_session.get_inputs()[0].name: img_tensor})
            
            # Before模型推理
            before_outputs = before_session.run(None, {before_session.get_inputs()[0].name: img_tensor})
            
            # 对比每个输出
            if len(original_outputs) != len(before_outputs):
                print(f"  ⚠️ 警告: 输出数量不匹配 - 原始:{len(original_outputs)}, Before:{len(before_outputs)}")
                continue
            
            for j, (orig_out, before_out) in enumerate(zip(original_outputs, before_outputs)):
                if orig_out.shape != before_out.shape:
                    print(f"  ⚠️ 警告: 输出{j}形状不匹配 - 原始:{orig_out.shape}, Before:{before_out.shape}")
                    continue
                    
                diff = np.abs(orig_out - before_out)
                mean_diff = np.mean(diff)
                max_diff_curr = np.max(diff)
                
                print(f"  输出{j}: 平均差异={mean_diff:.8f}, 最大差异={max_diff_curr:.8f}")
                print(f"  输出{j}: 原始范围=[{orig_out.min():.6f}, {orig_out.max():.6f}]")
                print(f"  输出{j}: Before范围=[{before_out.min():.6f}, {before_out.max():.6f}]")
                
                total_diff += mean_diff
                max_diff = max(max_diff, max_diff_curr)
                
                if mean_diff > 1e-5:  # 如果差异太大
                    print(f"  ⚠️ 警告: 输出{j}差异过大!")
                    
                    # 保存详细调试信息
                    ensure_dir("debug/dummy")
                    np.save(f"debug/debug_original_output_{i}_{j}.npy", orig_out)
                    np.save(f"debug/debug_before_output_{i}_{j}.npy", before_out)
                    np.save(f"debug/debug_diff_{i}_{j}.npy", diff)
            
            valid_images += 1
            
        except Exception as e:
            print(f"  错误: 处理图片{img_file}时出错: {e}")
            continue
    
    # 清理临时文件
    if os.path.exists(temp_original_path):
        os.remove(temp_original_path)
    
    if valid_images > 0:
        avg_diff = total_diff / valid_images
        print(f"\n总体评估:")
        print(f"有效图片数: {valid_images}")
        print(f"平均差异: {avg_diff:.8f}")
        print(f"最大差异: {max_diff:.8f}")
        
        return max_diff < 1e-5  # 返回是否通过验证
    else:
        print("没有有效的测试图片")
        return False

def validate_end_to_end(original_onnx_path, before_onnx_path, after_onnx_path, test_images):
    """验证端到端拆分重组精度"""
    
    print("=== 端到端验证 ===")
    
    # 加载模型
    original_session = ort.InferenceSession(original_onnx_path)
    before_session = ort.InferenceSession(before_onnx_path)
    after_session = ort.InferenceSession(after_onnx_path)
    
    print(f"After模型输入: {[inp.name for inp in after_session.get_inputs()]}")
    print(f"Before模型输出: {[out.name for out in before_session.get_outputs()]}")
    
    total_diff = 0
    max_diff = 0
    valid_images = 0
    
    for i, img_file in enumerate(test_images[:3]):  # 测试前3张图
        if not os.path.exists(img_file):
            continue
            
        print(f"\n--- 端到端测试图片 {i+1}: {os.path.basename(img_file)} ---")
        
        try:
            # 预处理
            img_tensor, _, _, _, _ = preprocess_image(img_file, 640)
            
            # 原始模型推理
            original_outputs = original_session.run(None, {original_session.get_inputs()[0].name: img_tensor})
            
            # 拆分模型推理
            before_outputs = before_session.run(None, {before_session.get_inputs()[0].name: img_tensor})
            
            # 构建After模型的输入字典
            after_input_dict = {}
            after_input_names = [inp.name for inp in after_session.get_inputs()]
            before_output_names = [out.name for out in before_session.get_outputs()]
            
            # 匹配Before输出到After输入
            for j, before_output in enumerate(before_outputs):
                if j < len(before_output_names):
                    before_output_name = before_output_names[j]
                    if before_output_name in after_input_names:
                        after_input_dict[before_output_name] = before_output
                        print(f"  映射: {before_output_name} -> After输入")
            
            # 检查是否所有After输入都有对应值
            for after_input_name in after_input_names:
                if after_input_name not in after_input_dict:
                    print(f"  警告: After输入 {after_input_name} 没有对应的Before输出")
                    # 尝试其他匹配方式
                    for j, before_output in enumerate(before_outputs):
                        if j < len(after_input_names):
                            after_input_dict[after_input_name] = before_output
                            print(f"  使用索引匹配: 输出{j} -> {after_input_name}")
                            break
            
            if len(after_input_dict) != len(after_input_names):
                print(f"  错误: After输入数量不匹配，跳过此图片")
                continue
            
            after_outputs = after_session.run(None, after_input_dict)
            
            # 对比结果
            for j, (orig_out, split_out) in enumerate(zip(original_outputs, after_outputs)):
                if orig_out.shape != split_out.shape:
                    print(f"  ⚠️ 警告: 最终输出{j}形状不匹配")
                    continue
                    
                diff = np.abs(orig_out - split_out)
                mean_diff = np.mean(diff)
                max_diff_curr = np.max(diff)
                
                print(f"  最终输出{j}: 平均差异={mean_diff:.8f}, 最大差异={max_diff_curr:.8f}")
                
                total_diff += mean_diff
                max_diff = max(max_diff, max_diff_curr)
                
                if mean_diff > 1e-4:
                    print(f"  ⚠️ 警告: 最终输出{j}差异过大!")
            
            valid_images += 1
            
        except Exception as e:
            print(f"  错误: 端到端测试出错: {e}")
            import traceback
            traceback.print_exc()
            continue
    
    if valid_images > 0:
        avg_diff = total_diff / valid_images
        print(f"\n端到端验证结果:")
        print(f"有效图片数: {valid_images}")
        print(f"平均差异: {avg_diff:.8f}")
        print(f"最大差异: {max_diff:.8f}")
        
        return max_diff < 1e-4
    else:
        return False

def main_validation():
    """完整的验证流程"""
    
    original_onnx = NMS_ONNX 
    split_node = "/Transpose"
    
    # 检查原始模型是否存在
    if not os.path.exists(original_onnx):
        print(f"错误: 原始模型文件不存在: {original_onnx}")
        return False
    
    # 1. 执行改进的模型拆分
    print("=== 开始模型拆分 ===")
    try:
        before_model, after_model = split_model_improved(original_onnx, split_node)
    except Exception as e:
        print(f"模型拆分失败: {e}")
        import traceback
        traceback.print_exc()
        return False
    
    # 2. 获取测试图片
    test_images = glob.glob(os.path.join(DATA_DIR, "*.jpg"))
    if not test_images:
        print(f"警告: 在{DATA_DIR}中未找到测试图片")
        # 创建一个虚拟测试图片用于验证
        dummy_img = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
        dummy_path = "dummy_test.jpg"
        cv2.imwrite(dummy_path, dummy_img)
        test_images = [dummy_path]
        print(f"创建虚拟测试图片: {dummy_path}")
    
    print(f"找到 {len(test_images)} 张测试图片")
    
    # 3. 验证Before模型的输出精度
    print("\n=== 验证Before模型输出精度 ===")
    try:
        is_valid = validate_crop_point_output(
            original_onnx, 
            SAVE_BEFORE, 
            test_images,
            split_node
        )
        
        if is_valid:
            print("✅ Before模型验证通过")
        else:
            print("❌ Before模型验证失败，但继续进行端到端验证")
    except Exception as e:
        print(f"Before模型验证出错: {e}")
        import traceback
        traceback.print_exc()
        is_valid = False
    
    # 4. 验证完整的拆分重组
    print("\n=== 验证完整拆分重组精度 ===")
    try:
        end_to_end_valid = validate_end_to_end(
            original_onnx,
            SAVE_BEFORE,
            SAVE_AFTER,
            test_images
        )
        
        if end_to_end_valid:
            print("✅ 端到端验证通过")
        else:
            print("❌ 端到端验证失败")
    except Exception as e:
        print(f"端到端验证出错: {e}")
        import traceback
        traceback.print_exc()
        end_to_end_valid = False
    
    # 清理虚拟文件
    if "dummy_test.jpg" in test_images:
        os.remove("dummy_test.jpg")
    
    # 5. 总结
    print("\n=== 验证总结 ===")
    if is_valid and end_to_end_valid:
        print("🎉 所有验证通过！模型拆分成功且精度保持良好")
        return True
    elif end_to_end_valid:
        print("⚠️ 端到端验证通过，但Before模型验证有问题")
        return True
    else:
        print("❌ 验证失败，需要进一步调试")
        return False

if __name__ == "__main__":
    success = main_validation()
    if success:
        print(f"\n模型拆分完成！")
        print(f"Before模型: {SAVE_BEFORE}")
        print(f"After模型: {SAVE_AFTER}")
    else:
        print("\n模型拆分验证失败，请检查错误信息")