#!/usr/bin/env python3
"""
Visualize saved VGGT outputs from npz files.

Usage:
    python /data0/liqifeng/ZYC/vggt/tools/visualize_saved_outputs.py \
  --npz-path /path/to/outputs.npz \
  --conf-thres 50.0 \
  --visualize \
  --port 8000
"""

import os
import sys
import glob
import torch
import numpy as np
import shutil
import argparse
import webbrowser
import threading
import json
from pathlib import Path
from datetime import datetime
from flask import Flask, request, jsonify, send_from_directory, send_file
from flask_cors import CORS
from io import BytesIO

# Add paths
SCRIPT_DIR = Path(__file__).resolve().parent
VGGT_ROOT = SCRIPT_DIR.parent
if str(VGGT_ROOT) not in sys.path:
    sys.path.insert(0, str(VGGT_ROOT))

from vggt.utils.pose_enc import pose_encoding_to_extri_intri
from vggt.utils.geometry import unproject_depth_map_to_point_map

# Import visual_util from the parent directory
sys.path.insert(0, str(VGGT_ROOT))
from visual_util import predictions_to_glb
try:
    from PIL import Image  # type: ignore
except Exception:
    Image = None
try:
    import matplotlib.cm as mpl_cm  # type: ignore
except Exception:
    mpl_cm = None


def load_saved_predictions(npz_path, metadata_path=None):
    """Load predictions from saved npz file and reconstruct the format needed for visualization."""
    print(f"Loading predictions from: {npz_path}")
    
    # Load the npz file
    data = np.load(npz_path, allow_pickle=True)
    predictions = {}
    
    # Load basic predictions
    for key in data.keys():
        predictions[key] = data[key]
    
    # Reconstruct pose_enc_list if saved as separate items
    pose_enc_list = []
    for i in range(4):
        key = f"pose_enc_list_item_{i}"
        if key in predictions:
            pose_enc_list.append(predictions[key])
    if pose_enc_list:
        predictions['pose_enc_list'] = pose_enc_list
    
    # Load metadata if available
    if metadata_path is None:
        metadata_path = os.path.join(os.path.dirname(npz_path), "metadata.json")
    
    metadata = {}
    if os.path.exists(metadata_path):
        with open(metadata_path, 'r') as f:
            metadata = json.load(f)
        print(f"Loaded metadata from: {metadata_path}")
        print(f"  - Number of images: {metadata.get('num_images', 'unknown')}")
        print(f"  - Scene: {metadata.get('scene_name', 'unknown')}")
    
    # Get image paths from metadata
    image_paths = metadata.get('image_paths', [])
    
    return predictions, metadata, image_paths


def process_saved_outputs(npz_path, output_dir, conf_thres=50.0, show_cam=True, 
                         prediction_mode="Depthmap and Camera Branch"):
    """Process saved model outputs and create visualization."""
    
    print(f"Processing saved outputs from: {npz_path}")
    print(f"Output directory: {output_dir}")
    print(f"Confidence threshold: {conf_thres}%")
    
    # Load predictions
    predictions, metadata, image_paths = load_saved_predictions(npz_path)
    
    # Create output directory structure
    os.makedirs(output_dir, exist_ok=True)
    target_dir_images = os.path.join(output_dir, "images")
    os.makedirs(target_dir_images, exist_ok=True)
    
    # Copy images and load them into predictions
    if image_paths:
        print(f"Loading {len(image_paths)} images...")
        try:
            from PIL import Image
            images_list = []
            
            # Infer target size from predictions
            if 'depth' in predictions:
                depth_shape = predictions['depth'].shape
                if len(depth_shape) == 5:  # (1, S, H, W, 1)
                    target_h, target_w = depth_shape[2], depth_shape[3]
                elif len(depth_shape) == 4:  # (S, H, W, 1) or (1, S, H, W)
                    target_h, target_w = depth_shape[-3], depth_shape[-2]
                else:
                    target_h, target_w = depth_shape[-2], depth_shape[-1]
            elif 'world_points' in predictions:
                points_shape = predictions['world_points'].shape
                target_h, target_w = points_shape[-3], points_shape[-2]
            else:
                target_h, target_w = 518, 518  # Default
            
            print(f"  Target image size: {target_h}x{target_w}")
            
            for img_path in image_paths:
                if os.path.exists(img_path):
                    # Copy original image
                    dst_path = os.path.join(target_dir_images, os.path.basename(img_path))
                    shutil.copy(img_path, dst_path)
                    
                    # Load and resize image
                    img = Image.open(img_path).convert('RGB')
                    if img.size != (target_w, target_h):
                        img = img.resize((target_w, target_h), Image.BILINEAR)
                    img_array = np.array(img)
                    images_list.append(img_array)
                else:
                    print(f"Warning: Image not found: {img_path}")
            
            if images_list:
                # Stack images: (S, H, W, 3)
                images_array = np.stack(images_list, axis=0)
                predictions['images'] = images_array
                print(f"  Loaded images shape: {images_array.shape}")
        except Exception as e:
            print(f"  Warning: Could not load images: {e}")
            import traceback
            traceback.print_exc()
            # Create dummy images if needed
            if 'depth' in predictions:
                depth_shape = predictions['depth'].shape
                if len(depth_shape) == 5:
                    S, H, W = depth_shape[1], depth_shape[2], depth_shape[3]
                else:
                    S, H, W = depth_shape[0], depth_shape[1], depth_shape[2]
                predictions['images'] = np.zeros((S, H, W, 3), dtype=np.uint8)
                print(f"  Created dummy images: {predictions['images'].shape}")
    else:
        print("Warning: No image paths found in metadata")
    
    # Convert pose encoding to extrinsic and intrinsic if not already present
    if 'extrinsic' not in predictions or 'intrinsic' not in predictions:
        print("Converting pose encoding to camera parameters...")
        pose_enc = predictions['pose_enc']
        
        print(f"  Original pose_enc shape: {pose_enc.shape}")
        
        # pose_enc should be (B, S, 9) or (S, 9)
        # The function expects (B, S, 9), so if it's (S, 9), we need to add batch dim
        if pose_enc.ndim == 2:  # (S, 9)
            pose_enc = pose_enc[np.newaxis, ...]  # (1, S, 9)
            print(f"  Added batch dim: {pose_enc.shape}")
        elif pose_enc.ndim == 3 and pose_enc.shape[0] > 1:
            # Squeeze batch dimension if it's size 1
            pass  # Already correct shape (B, S, 9)
        
        # Infer image shape from depth or world_points
        if 'depth' in predictions:
            depth_shape = predictions['depth'].shape
            if len(depth_shape) == 5:  # (1, S, H, W, 1)
                img_shape = (depth_shape[2], depth_shape[3])
            elif len(depth_shape) == 4:
                img_shape = (depth_shape[-3], depth_shape[-2])
            else:
                img_shape = (depth_shape[-2], depth_shape[-1])
        elif 'world_points' in predictions:
            points_shape = predictions['world_points'].shape
            img_shape = (points_shape[-3], points_shape[-2])
        else:
            img_shape = (518, 518)  # Default
        
        # Convert to torch tensor temporarily for conversion
        pose_enc_tensor = torch.from_numpy(pose_enc)
        extrinsic, intrinsic = pose_encoding_to_extri_intri(pose_enc_tensor, img_shape)
        predictions['extrinsic'] = extrinsic.numpy()
        predictions['intrinsic'] = intrinsic.numpy()
        print(f"  Extrinsic shape: {predictions['extrinsic'].shape}")
        print(f"  Intrinsic shape: {predictions['intrinsic'].shape}")
    else:
        # Ensure extrinsic and intrinsic don't have batch dimension
        if predictions['extrinsic'].ndim == 3 and predictions['extrinsic'].shape[0] == 1:
            predictions['extrinsic'] = predictions['extrinsic'].squeeze(0)
        if predictions['intrinsic'].ndim == 3 and predictions['intrinsic'].shape[0] == 1:
            predictions['intrinsic'] = predictions['intrinsic'].squeeze(0)
    
    # Generate world points from depth if needed (optional, we already have world_points)
    if 'world_points_from_depth' not in predictions and 'depth' in predictions:
        print("Skipping world_points_from_depth computation (using existing world_points)")
        # We already have world_points from the model, no need to recompute
    
    # Save processed predictions
    prediction_save_path = os.path.join(output_dir, "predictions.npz")
    save_dict = {k: v for k, v in predictions.items() if not k.startswith('pose_enc_list_item_')}
    np.savez(prediction_save_path, **save_dict)
    print(f"Saved processed predictions to {prediction_save_path}")
    
    # Prepare predictions for GLB export (remove batch dimensions)
    glb_predictions = {}
    for key, value in predictions.items():
        if isinstance(value, np.ndarray):
            # Remove batch dimension if present (first dimension = 1)
            if value.ndim > 0 and value.shape[0] == 1:
                glb_predictions[key] = value.squeeze(0)
            else:
                glb_predictions[key] = value
        else:
            glb_predictions[key] = value
    
    print(f"Prepared predictions for GLB export:")
    for key in ['images', 'depth', 'world_points', 'extrinsic', 'intrinsic']:
        if key in glb_predictions and isinstance(glb_predictions[key], np.ndarray):
            print(f"  {key}: {glb_predictions[key].shape}")
    
    # Create GLB
    print("Creating 3D model...")
    glbfile = os.path.join(output_dir, f"scene_conf{conf_thres}.glb")
    glbscene = predictions_to_glb(
        glb_predictions,
        conf_thres=conf_thres,
        filter_by_frames="All",
        mask_black_bg=False,
        mask_white_bg=False,
        show_cam=show_cam,
        mask_sky=False,
        target_dir=output_dir,
        prediction_mode=prediction_mode,
    )
    glbscene.export(file_obj=glbfile)
    print(f"Saved 3D model to {glbfile}")
    
    print("\n✅ Processing complete!")
    return glbfile


def create_viewer_html(glb_filename, output_dir, default_conf_thres=50.0):
    """Create an HTML viewer for the GLB file"""
    html_content = f"""
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>3D Reconstruction Viewer</title>
    <script type="module" src="https://unpkg.com/@google/model-viewer/dist/model-viewer.min.js"></script>
    <style>
        body {{
            margin: 0;
            padding: 20px;
            font-family: Arial, sans-serif;
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            min-height: 100vh;
        }}
        .container {{
            max-width: 1400px;
            margin: 0 auto;
            background: white;
            border-radius: 20px;
            padding: 30px;
            box-shadow: 0 20px 60px rgba(0,0,0,0.3);
        }}
        h1 {{
            text-align: center;
            color: #333;
            margin-bottom: 10px;
        }}
        .info {{
            text-align: center;
            color: #666;
            margin-bottom: 30px;
        }}
        model-viewer {{
            width: 100%;
            height: 700px;
            background-color: #f0f0f0;
            border-radius: 10px;
            box-shadow: inset 0 2px 10px rgba(0,0,0,0.1);
        }}
        .controls {{
            margin-top: 20px;
            text-align: center;
            padding: 20px;
            background: #f8f9fa;
            border-radius: 10px;
        }}
        .depth-panel {{
            margin-top: 20px;
            padding: 20px;
            background: #fff;
            border-radius: 10px;
            box-shadow: 0 10px 25px rgba(0,0,0,0.15);
        }}
        .depth-header {{
            display: flex;
            justify-content: space-between;
            align-items: center;
            margin-bottom: 10px;
        }}
        .depth-controls {{
            display: flex;
            gap: 10px;
            align-items: center;
            flex-wrap: wrap;
        }}
        .depth-img {{
            width: 100%;
            max-height: 600px;
            object-fit: contain;
            background: #111;
            border-radius: 8px;
        }}
        .controls button {{
            margin: 5px;
            padding: 10px 20px;
            font-size: 14px;
            border: none;
            border-radius: 5px;
            cursor: pointer;
            background: #667eea;
            color: white;
            transition: all 0.3s;
        }}
        .controls button:hover {{
            background: #764ba2;
            transform: translateY(-2px);
            box-shadow: 0 5px 15px rgba(0,0,0,0.2);
        }}
        .controls button:disabled {{
            background: #ccc;
            cursor: not-allowed;
            transform: none;
        }}
        .confidence-control {{
            margin: 20px auto;
            padding: 25px;
            background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
            border-radius: 10px;
            max-width: 600px;
        }}
        .confidence-label {{
            font-size: 16px;
            font-weight: bold;
            color: #333;
            margin-bottom: 15px;
            display: flex;
            justify-content: space-between;
            align-items: center;
        }}
        .confidence-value {{
            color: #667eea;
            font-size: 20px;
        }}
        .slider-container {{
            display: flex;
            align-items: center;
            gap: 15px;
        }}
        .slider {{
            flex: 1;
            height: 8px;
            border-radius: 5px;
            background: #ddd;
            outline: none;
            -webkit-appearance: none;
        }}
        .slider::-webkit-slider-thumb {{
            -webkit-appearance: none;
            appearance: none;
            width: 20px;
            height: 20px;
            border-radius: 50%;
            background: #667eea;
            cursor: pointer;
            transition: all 0.3s;
        }}
        .slider::-webkit-slider-thumb:hover {{
            background: #764ba2;
            transform: scale(1.2);
        }}
        .slider::-moz-range-thumb {{
            width: 20px;
            height: 20px;
            border-radius: 50%;
            background: #667eea;
            cursor: pointer;
            border: none;
            transition: all 0.3s;
        }}
        .slider::-moz-range-thumb:hover {{
            background: #764ba2;
            transform: scale(1.2);
        }}
        .preset-buttons {{
            display: flex;
            gap: 10px;
            justify-content: center;
            margin-top: 15px;
        }}
        .preset-buttons button {{
            padding: 8px 16px;
            font-size: 12px;
            border: none;
            border-radius: 5px;
            cursor: pointer;
            background: white;
            color: #667eea;
            border: 2px solid #667eea;
            transition: all 0.3s;
        }}
        .preset-buttons button:hover {{
            background: #667eea;
            color: white;
        }}
        .loading-overlay {{
            position: fixed;
            top: 0;
            left: 0;
            width: 100%;
            height: 100%;
            background: rgba(0, 0, 0, 0.7);
            display: none;
            justify-content: center;
            align-items: center;
            z-index: 9999;
        }}
        .loading-content {{
            background: white;
            padding: 30px;
            border-radius: 10px;
            text-align: center;
        }}
        .spinner {{
            border: 4px solid #f3f3f3;
            border-top: 4px solid #667eea;
            border-radius: 50%;
            width: 40px;
            height: 40px;
            animation: spin 1s linear infinite;
            margin: 0 auto 15px;
        }}
        @keyframes spin {{
            0% {{ transform: rotate(0deg); }}
            100% {{ transform: rotate(360deg); }}
        }}
        .stats {{
            margin-top: 20px;
            display: grid;
            grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
            gap: 15px;
        }}
        .stat-card {{
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            color: white;
            padding: 20px;
            border-radius: 10px;
            text-align: center;
        }}
        .stat-label {{
            font-size: 14px;
            opacity: 0.9;
            margin-bottom: 5px;
        }}
        .stat-value {{
            font-size: 24px;
            font-weight: bold;
        }}
        .footer {{
            margin-top: 20px;
            text-align: center;
            color: #666;
            font-size: 14px;
        }}
    </style>
</head>
<body>
    <div class="container">
        <h1>🏛️ 3D Scene Reconstruction</h1>
        <div class="info">
            Interactive 3D viewer - Use mouse to rotate, zoom, and pan
        </div>
        
        <model-viewer
            src="{os.path.basename(glb_filename)}"
            alt="3D Scene Reconstruction"
            auto-rotate
            camera-controls
            camera-orbit="0deg 75deg 2m"
            shadow-intensity="1"
            exposure="1"
            environment-image="neutral">
            
            <div class="progress-bar hide" slot="progress-bar">
                <div class="update-bar"></div>
            </div>
        </model-viewer>
        
        <div class="confidence-control">
            <div class="confidence-label">
                <span>🎯 Confidence Threshold</span>
                <span class="confidence-value" id="confValue">{default_conf_thres:.1f}%</span>
            </div>
            <div class="slider-container">
                <span style="color: #666;">0%</span>
                <input type="range" min="0" max="100" value="{default_conf_thres}" 
                       class="slider" id="confSlider" step="0.1">
                <span style="color: #666;">100%</span>
            </div>
            <div class="preset-buttons">
                <button onclick="setConfidence(25)">Low (25%)</button>
                <button onclick="setConfidence(50)">Medium (50%)</button>
                <button onclick="setConfidence(75)">High (75%)</button>
                <button onclick="setConfidence(90)">Very High (90%)</button>
            </div>
            <div style="margin-top: 15px;">
                <button onclick="updateModel()" style="padding: 12px 30px; font-size: 16px; 
                        background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); 
                        border: none; color: white; border-radius: 5px; cursor: pointer;">
                    🔄 Update Model
                </button>
            </div>
        </div>
        
        <div class="depth-panel">
            <div class="depth-header">
                <h3 style="margin:0;">🖼️ DPT Depth Visualization</h3>
                <div class="depth-controls">
                    <label for="depthFrame">Frame: <span id="depthFrameVal">0</span></label>
                    <input type="range" id="depthFrame" min="0" max="0" value="0" step="1">
                    <select id="colormap">
                        <option value="inferno">inferno</option>
                        <option value="turbo">turbo</option>
                        <option value="magma">magma</option>
                        <option value="viridis">viridis</option>
                        <option value="gray">gray</option>
                    </select>
                    <button onclick="refreshDepth()">Refresh</button>
                </div>
            </div>
            <img id="depthImg" class="depth-img" alt="Depth visualization" />
            <div style="margin-top:8px;color:#666;" id="depthMeta">Frames: -, Size: -</div>
        </div>

        <div class="controls">
            <button onclick="document.querySelector('model-viewer').cameraOrbit = '0deg 75deg 2m'">Reset View</button>
            <button onclick="toggleAutoRotate()">Toggle Auto-Rotate</button>
            <button onclick="downloadModel()">Download Model</button>
        </div>
        
        <div class="loading-overlay" id="loadingOverlay">
            <div class="loading-content">
                <div class="spinner"></div>
                <p style="color: #333; font-size: 16px;">Regenerating 3D model...</p>
                <p style="color: #666; font-size: 14px;">This may take a few moments</p>
            </div>
        </div>
        
        <div class="stats">
            <div class="stat-card">
                <div class="stat-label">Current Model</div>
                <div class="stat-value" id="currentModel">{os.path.basename(glb_filename)}</div>
            </div>
            <div class="stat-card">
                <div class="stat-label">Output Directory</div>
                <div class="stat-value">{os.path.basename(output_dir)}</div>
            </div>
            <div class="stat-card">
                <div class="stat-label">Current Confidence</div>
                <div class="stat-value" id="currentConf">{default_conf_thres:.1f}%</div>
            </div>
        </div>
        
        <div class="footer">
            <p>Generated with VGGT Mini - Visual Geometry Grounded Transformer</p>
            <p>🔄 Rotate: Left click + drag | 🔍 Zoom: Scroll | 📐 Pan: Right click + drag</p>
        </div>
    </div>
    
    <script>
        const slider = document.getElementById('confSlider');
        const confValue = document.getElementById('confValue');
        const outputDir = '{output_dir}';
    const depthImg = document.getElementById('depthImg');
    const depthFrame = document.getElementById('depthFrame');
    const depthFrameVal = document.getElementById('depthFrameVal');
    const depthMeta = document.getElementById('depthMeta');
    const colormapSel = document.getElementById('colormap');
        
        // Update displayed value when slider moves
        slider.oninput = function() {{
            confValue.textContent = parseFloat(this.value).toFixed(1) + '%';
        }}
        
        function setConfidence(value) {{
            slider.value = value;
            confValue.textContent = value.toFixed(1) + '%';
        }}
        
        function toggleAutoRotate() {{
            const viewer = document.querySelector('model-viewer');
            viewer.autoRotate = !viewer.autoRotate;
        }}
        
        function downloadModel() {{
            const viewer = document.querySelector('model-viewer');
            const src = viewer.src;
            window.location.href = src;
        }}

        async function loadDepthMeta() {{
            try {{
                const resp = await fetch('/depth_meta');
                if (!resp.ok) throw new Error('depth_meta failed');
                const meta = await resp.json();
                const S = meta.frames || 0;
                depthFrame.max = Math.max(0, S-1);
                depthFrame.value = 0;
                depthFrameVal.textContent = '0';
                depthMeta.textContent = `Frames: ${{S}}, Size: ${{meta.height}}x${{meta.width}}`;
                await refreshDepth();
            }} catch (e) {{
                console.error(e);
                depthMeta.textContent = 'Depth not available';
            }}
        }}

        async function refreshDepth() {{
            const f = parseInt(depthFrame.value);
            depthFrameVal.textContent = f.toString();
            const cm = colormapSel.value;
            const url = `/depth_png?frame=${{f}}&cm=${{cm}}&t=${{Date.now()}}`;
            depthImg.src = url;
        }}

        depthFrame.addEventListener('input', refreshDepth);
        colormapSel.addEventListener('change', refreshDepth);

        // Load depth metadata on startup
        loadDepthMeta();
        
        async function updateModel() {{
            const confThres = parseFloat(slider.value);
            const loadingOverlay = document.getElementById('loadingOverlay');
            const viewer = document.querySelector('model-viewer');
            
            // Show loading overlay
            loadingOverlay.style.display = 'flex';
            
            try {{
                // Call backend API to regenerate model
                const response = await fetch('/regenerate', {{
                    method: 'POST',
                    headers: {{
                        'Content-Type': 'application/json',
                    }},
                    body: JSON.stringify({{
                        conf_thres: confThres,
                        output_dir: outputDir
                    }})
                }});
                
                if (!response.ok) {{
                    throw new Error('Failed to regenerate model');
                }}
                
                const data = await response.json();
                
                // Update model viewer
                const newModelPath = data.glb_file + '?t=' + new Date().getTime(); // Cache busting
                viewer.src = newModelPath;
                
                // Update stats
                document.getElementById('currentModel').textContent = data.glb_filename;
                document.getElementById('currentConf').textContent = confThres.toFixed(1) + '%';
                
                // Hide loading overlay after model loads
                viewer.addEventListener('load', function() {{
                    loadingOverlay.style.display = 'none';
                }}, {{ once: true }});
                
            }} catch (error) {{
                console.error('Error:', error);
                alert('Failed to update model: ' + error.message);
                loadingOverlay.style.display = 'none';
            }}
        }}
    </script>
</body>
</html>
    """
    
    html_path = os.path.join(output_dir, "viewer.html")
    with open(html_path, 'w') as f:
        f.write(html_content)
    
    print(f"Created viewer HTML: {html_path}")
    return html_path


def start_server(output_dir, input_dir, port=8000, model=None, device=None, 
                 show_cam=True, prediction_mode="Depthmap and Camera Branch"):
    """Start a Flask server to serve the viewer and handle regeneration requests"""
    
    app = Flask(__name__)
    CORS(app)
    
    # Store regeneration parameters
    app.config['output_dir'] = output_dir
    app.config['input_dir'] = input_dir
    app.config['model'] = model
    app.config['device'] = device
    app.config['show_cam'] = show_cam
    app.config['prediction_mode'] = prediction_mode
    
    @app.route('/')
    def index():
        return send_from_directory(output_dir, 'viewer.html')
    
    @app.route('/<path:path>')
    def serve_file(path):
        return send_from_directory(output_dir, path)
    
    @app.route('/regenerate', methods=['POST'])
    def regenerate():
        try:
            data = request.json
            conf_thres = float(data['conf_thres'])
            
            print(f"\n🔄 Regenerating model with confidence threshold: {conf_thres}%")
            
            # Get image paths
            image_extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp']
            image_paths = []
            for ext in image_extensions:
                image_paths.extend(glob.glob(os.path.join(input_dir, f"*{ext}")))
                image_paths.extend(glob.glob(os.path.join(input_dir, f"*{ext.upper()}")))
            image_paths = sorted(image_paths)
            
            # Load predictions
            prediction_path = os.path.join(output_dir, "predictions.npz")
            if not os.path.exists(prediction_path):
                return jsonify({'error': 'Predictions file not found'}), 404
            
            predictions = dict(np.load(prediction_path, allow_pickle=True))
            
            # Remove batch dimensions for GLB export
            glb_predictions = {}
            for key, value in predictions.items():
                if isinstance(value, np.ndarray):
                    # Remove batch dimension if present (first dimension = 1)
                    if value.ndim > 0 and value.shape[0] == 1:
                        glb_predictions[key] = value.squeeze(0)
                    else:
                        glb_predictions[key] = value
                else:
                    glb_predictions[key] = value
            
            # Regenerate GLB with new confidence threshold
            glbfile = os.path.join(output_dir, f"scene_conf{conf_thres}.glb")
            glbscene = predictions_to_glb(
                glb_predictions,
                conf_thres=conf_thres,
                filter_by_frames="All",
                mask_black_bg=False,
                mask_white_bg=False,
                show_cam=show_cam,
                mask_sky=False,
                target_dir=output_dir,
                prediction_mode=prediction_mode,
            )
            glbscene.export(file_obj=glbfile)
            
            print(f"✅ Model regenerated: {glbfile}")
            
            return jsonify({
                'status': 'success',
                'glb_file': os.path.basename(glbfile),
                'glb_filename': os.path.basename(glbfile),
                'conf_thres': conf_thres
            })
            
        except Exception as e:
            print(f"❌ Error regenerating model: {e}")
            import traceback
            traceback.print_exc()
            return jsonify({'error': str(e)}), 500

    @app.route('/depth_meta', methods=['GET'])
    def depth_meta():
        try:
            prediction_path = os.path.join(output_dir, "predictions.npz")
            if not os.path.exists(prediction_path):
                return jsonify({'frames': 0, 'height': 0, 'width': 0}), 200
            preds = np.load(prediction_path, allow_pickle=True)
            depth = preds.get('depth')
            if depth is None:
                return jsonify({'frames': 0, 'height': 0, 'width': 0}), 200
            
            # Remove batch dimension if present
            if depth.ndim > 0 and depth.shape[0] == 1:
                depth = depth.squeeze(0)
            
            if depth.ndim == 2:
                frames, height, width = 1, depth.shape[0], depth.shape[1]
            elif depth.ndim == 3:
                frames, height, width = depth.shape[0], depth.shape[1], depth.shape[2]
            elif depth.ndim == 4 and depth.shape[-1] == 1:
                frames, height, width = depth.shape[0], depth.shape[1], depth.shape[2]
            else:
                # Unknown shape
                frames = int(depth.shape[0]) if depth.shape[0] > 0 else 0
                height = int(depth.shape[-2])
                width = int(depth.shape[-1])
            return jsonify({'frames': int(frames), 'height': int(height), 'width': int(width)}), 200
        except Exception as e:
            return jsonify({'frames': 0, 'height': 0, 'width': 0, 'error': str(e)}), 200

    def _colormap(depth2d: np.ndarray, cm_name: str = 'inferno') -> np.ndarray:
        # depth2d is (H, W) float array
        valid = depth2d > 0
        if valid.any():
            mn = float(depth2d[valid].min())
            mx = float(depth2d[valid].max())
        else:
            mn, mx = 0.0, 1.0
        denom = max(mx - mn, 1e-6)
        norm = np.clip((depth2d - mn) / denom, 0.0, 1.0)
        if mpl_cm is not None and hasattr(mpl_cm, cm_name):
            cmap = getattr(mpl_cm, cm_name)
            rgb = (cmap(norm)[..., :3] * 255).astype(np.uint8)
        else:
            # Gray fallback
            gray = (norm * 255).astype(np.uint8)
            rgb = np.stack([gray, gray, gray], axis=-1)
        return rgb

    @app.route('/depth_png', methods=['GET'])
    def depth_png():
        try:
            frame = int(request.args.get('frame', 0))
            cm_name = request.args.get('cm', 'inferno')
            prediction_path = os.path.join(output_dir, "predictions.npz")
            if not os.path.exists(prediction_path):
                return jsonify({'error': 'predictions.npz not found'}), 404
            preds = np.load(prediction_path, allow_pickle=True)
            depth = preds.get('depth')
            if depth is None:
                return jsonify({'error': 'depth not found in predictions'}), 404
            
            # Remove batch dimension if present
            if depth.ndim > 0 and depth.shape[0] == 1:
                depth = depth.squeeze(0)
            
            if depth.ndim == 2:
                depth2d = depth
            elif depth.ndim == 3:
                frame = max(0, min(depth.shape[0]-1, frame))
                depth2d = depth[frame]
            elif depth.ndim == 4 and depth.shape[-1] == 1:
                frame = max(0, min(depth.shape[0]-1, frame))
                depth2d = depth[frame, ..., 0]
            else:
                return jsonify({'error': f'unsupported depth shape {depth.shape}'}), 400

            rgb = _colormap(depth2d, cm_name)
            if Image is None:
                # Fallback to raw bytes as PNG via minimal encoder is not available; return as JSON error
                # but try to use numpy to encode as PPM in-memory and send as octet-stream (viewer won't display)
                buf = BytesIO()
                header = f"P6\n{rgb.shape[1]} {rgb.shape[0]}\n255\n".encode('ascii')
                buf.write(header)
                buf.write(rgb.tobytes())
                buf.seek(0)
                return send_file(buf, mimetype='application/octet-stream')

            img = Image.fromarray(rgb)
            out = BytesIO()
            img.save(out, format='PNG')
            out.seek(0)
            return send_file(out, mimetype='image/png')
        except Exception as e:
            return jsonify({'error': str(e)}), 500
    
    # Start server in a separate thread
    def run_server():
        print(f"\n🌐 Server started at http://localhost:{port}")
        print(f"📂 Serving files from: {output_dir}")
        print(f"\n✨ Opening browser... (Press Ctrl+C to stop the server)")
        app.run(host='0.0.0.0', port=port, debug=False, use_reloader=False)
    
    server_thread = threading.Thread(target=run_server, daemon=True)
    server_thread.start()
    
    # Open browser
    webbrowser.open(f"http://localhost:{port}/viewer.html")
    
    # Keep main thread alive
    try:
        server_thread.join()
    except KeyboardInterrupt:
        print("\n\n👋 Server stopped")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Visualize saved VGGT outputs from npz files")
    parser.add_argument("--npz-path", required=True,
                        help="Path to the saved outputs.npz file")
    parser.add_argument("--output-dir", default=None,
                        help="Output directory for visualization (default: same dir as npz file)")
    parser.add_argument("--conf-thres", type=float, default=50.0,
                        help="Confidence threshold percentage (0-100)")
    parser.add_argument("--show-cam", action="store_true", default=True,
                        help="Show camera positions in 3D model")
    parser.add_argument("--prediction-mode", default="Pointmap Branch",  
                        choices=["Depthmap and Camera Branch", "Pointmap Branch"],
                        help="Prediction mode to use")
    parser.add_argument("--visualize", action="store_true", default=False,
                        help="Start web server to visualize the 3D model")
    parser.add_argument("--port", type=int, default=8000,
                        help="Port for web server (default: 8000)")
    
    args = parser.parse_args()
    
    # Validate npz path
    if not os.path.exists(args.npz_path):
        print(f"❌ Error: NPZ file not found: {args.npz_path}")
        sys.exit(1)
    
    # Set default output directory if not specified
    if args.output_dir is None:
        npz_dir = os.path.dirname(args.npz_path)
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        args.output_dir = os.path.join(npz_dir, f"visualization_{timestamp}")
     
    try:
        # Process the saved outputs
        glbfile = process_saved_outputs(
            args.npz_path,
            args.output_dir,
            args.conf_thres,
            args.show_cam,
            args.prediction_mode
        )
        
        # Create HTML viewer with default confidence threshold
        html_path = create_viewer_html(glbfile, args.output_dir, args.conf_thres)
        
        print(f"\n📁 Output files:")
        print(f"   - 3D Model: {glbfile}")
        print(f"   - Viewer HTML: {html_path}")
        print(f"   - Predictions: {os.path.join(args.output_dir, 'predictions.npz')}")
        
        # Start visualization server if requested
        if args.visualize:
            # Get image paths from metadata
            metadata_path = os.path.join(os.path.dirname(args.npz_path), "metadata.json")
            image_dir = None
            if os.path.exists(metadata_path):
                with open(metadata_path, 'r') as f:
                    metadata = json.load(f)
                    image_paths = metadata.get('image_paths', [])
                    if image_paths:
                        image_dir = os.path.dirname(image_paths[0])
            
            start_server(
                args.output_dir, 
                image_dir if image_dir else args.output_dir,
                args.port,
                None,  # No model needed for regeneration from saved outputs
                None,
                args.show_cam,
                args.prediction_mode
            )
        else:
            print(f"\n💡 To visualize the model with adjustable confidence, run:")
            print(f"   python {__file__} --npz-path {args.npz_path} --visualize --output-dir {args.output_dir}")
            print(f"\n   Or open manually: {html_path}")
        
    except Exception as e:
        print(f"\n❌ Error: {e}")
        import traceback
        traceback.print_exc()
        sys.exit(1)
