#!/usr/bin/env python3
"""
Direct reconstruction script - process images from a specified directory without Gradio UI.

Usage:
    python /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/direct_reconstruct.py \
  --visualize \                     
  --port 8000
"""

import os
import sys
import glob
import torch
import numpy as np
import shutil
import argparse
import webbrowser
import threading
from pathlib import Path
from datetime import datetime
from flask import Flask, request, jsonify, send_from_directory, send_file
from flask_cors import CORS
from io import BytesIO

# Add paths
SCRIPT_DIR = Path(__file__).resolve().parent
VGGT_ROOT = SCRIPT_DIR.parents[1]
if str(VGGT_ROOT) not in sys.path:
    sys.path.insert(0, str(VGGT_ROOT))

from vggt.utils.load_fn import load_and_preprocess_images
from vggt.utils.pose_enc import pose_encoding_to_extri_intri
from vggt.utils.geometry import unproject_depth_map_to_point_map
from mini_vggt.Distill.vggt_mini import MiniVGGT, MiniVGGTConfig
from vggt.heads.dpt_head import DPTHead
from vggt.heads.camera_head import CameraHead
import torch.nn as nn
from visual_util import predictions_to_glb
try:
    from PIL import Image  # type: ignore
except Exception:
    Image = None
try:
    import matplotlib.cm as mpl_cm  # type: ignore
except Exception:
    mpl_cm = None


class HybridVGGT(torch.nn.Module):
    """Hybrid model combining MiniVGGT backbone with VGGT prediction heads."""
    
    def __init__(self, mini_vggt: MiniVGGT, projection_layers: nn.ModuleDict = None):
        super().__init__()
        self.backbone = mini_vggt
        
        student_dim = mini_vggt.config.embed_dim * 2  # 768
        teacher_dim = 2048
        
        if projection_layers is not None:
            self.projection_layers = projection_layers
        else:
            self.projection_layers = nn.ModuleDict({
                f"layer_{i}": nn.Linear(student_dim, teacher_dim)
                for i in range(4)
            })
        
        self.camera_head = CameraHead(dim_in=teacher_dim)
        self.point_head = DPTHead(
            dim_in=teacher_dim, 
            output_dim=4, 
            activation="inv_log", 
            conf_activation="expp1",
            intermediate_layer_idx=[0, 1, 2, 3]
        )
        self.depth_head = DPTHead(
            dim_in=teacher_dim, 
            output_dim=2, 
            activation="exp", 
            conf_activation="expp1",
            intermediate_layer_idx=[0, 1, 2, 3]
        )
    
    def forward(self, images: torch.Tensor) -> dict:
        if images.dim() == 4:
            images = images.unsqueeze(0)
        
        aggregated_tokens_list, patch_start_idx = self.backbone.forward_features(images)
        
        projected_tokens_list = []
        for i, tokens in enumerate(aggregated_tokens_list):
            layer_name = f"layer_{i}"
            if layer_name in self.projection_layers:
                B, S, P, C = tokens.shape
                tokens_flat = tokens.reshape(-1, C)
                projected = self.projection_layers[layer_name](tokens_flat)
                projected = projected.reshape(B, S, P, -1)
                projected_tokens_list.append(projected)
            else:
                projected_tokens_list.append(tokens)
        
        predictions = {}
        pose_enc_list = self.camera_head(projected_tokens_list)
        predictions["pose_enc"] = pose_enc_list[-1]
        predictions["pose_enc_list"] = pose_enc_list
        
        point_map, point_conf = self.point_head(projected_tokens_list, images, patch_start_idx)
        predictions["world_points"] = point_map
        predictions["world_points_conf"] = point_conf
        
        depth_map, depth_conf = self.depth_head(projected_tokens_list, images, patch_start_idx)
        predictions["depth"] = depth_map
        predictions["depth_conf"] = depth_conf
        predictions["images"] = images
        
        return predictions


def load_model(device):
    """Load the hybrid VGGT model"""
    print("Loading model...")
    
    # Load MiniVGGT backbone
    mini_vggt = MiniVGGT(MiniVGGTConfig())
    
    distilled_model_path = "/data0/liqifeng/ZYC/vggt/mini_vggt/Distill/mini_vggt_finetuned.pt"
    checkpoint = torch.load(distilled_model_path, map_location="cpu")
    
    if isinstance(checkpoint, dict) and "model" in checkpoint:
        mini_state_dict = checkpoint["model"]
    else:
        mini_state_dict = checkpoint
    
    mini_vggt.load_state_dict(mini_state_dict, strict=False)
    print("Loaded MiniVGGT backbone")
    
    # Load projection layers
    projection_layers = nn.ModuleDict()
    if isinstance(checkpoint, dict) and "projector" in checkpoint:
        projector_state_dict = checkpoint["projector"]
        
        student_dim = 768 * 2  # From MiniVGGTConfig
        teacher_dim = 2048     # Expected by VGGT heads

        for i in range(4):
            weight_key = f"projections.{i}.weight"
            bias_key = f"projections.{i}.bias"
            
            if weight_key in projector_state_dict and bias_key in projector_state_dict:
                proj_layer = nn.Linear(student_dim, teacher_dim)
                proj_layer.weight.data = projector_state_dict[weight_key]
                proj_layer.bias.data = projector_state_dict[bias_key]
                projection_layers[f"layer_{i}"] = proj_layer
        print(f"Loaded {len(projection_layers)} projection layers from checkpoint")
    
    model = HybridVGGT(mini_vggt, projection_layers)
    
    # Load prediction heads
    original_model_path = "/data0/liqifeng/ZYC/model.pt"
    original_checkpoint = torch.load(original_model_path, map_location="cpu")
    original_state_dict = original_checkpoint.get("model", original_checkpoint)
    
    head_state_dict = {}
    for key, value in original_state_dict.items():
        if "point_head" in key or "depth_head" in key or "camera_head" in key:
            head_state_dict[key] = value
    
    model.load_state_dict(head_state_dict, strict=False)
    print("Loaded prediction heads")
    
    model.eval()
    model = model.to(device)
    return model


def reconstruct(input_dir, output_dir, conf_thres=50.0, show_cam=True, prediction_mode="Depthmap and Camera Branch"):
    """Main reconstruction function"""
    
    device = "cuda" if torch.cuda.is_available() else "cpu"
    if not torch.cuda.is_available():
        raise ValueError("CUDA is not available")
    
    print(f"Input directory: {input_dir}")
    print(f"Output directory: {output_dir}")
    print(f"Confidence threshold: {conf_thres}%")
    
    # Get all images
    image_extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp']
    image_paths = []
    for ext in image_extensions:
        image_paths.extend(glob.glob(os.path.join(input_dir, f"*{ext}")))
        image_paths.extend(glob.glob(os.path.join(input_dir, f"*{ext.upper()}")))
    
    image_paths = sorted(image_paths)
    
    if len(image_paths) == 0:
        raise ValueError(f"No images found in {input_dir}")
    
    print(f"Found {len(image_paths)} images")
    
    # Create output directory structure
    os.makedirs(output_dir, exist_ok=True)
    target_dir_images = os.path.join(output_dir, "images")
    os.makedirs(target_dir_images, exist_ok=True)
    
    # Copy images
    for img_path in image_paths:
        dst_path = os.path.join(target_dir_images, os.path.basename(img_path))
        shutil.copy(img_path, dst_path)
    
    # Load model
    model = load_model(device)
    
    # Load and preprocess images
    print("Preprocessing images...")
    images = load_and_preprocess_images(image_paths).to(device)
    print(f"Preprocessed images shape: {images.shape}")
    
    # Run inference
    print("Running inference...")
    dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16
    
    with torch.no_grad():
        with torch.cuda.amp.autocast(dtype=dtype):
            predictions = model(images)
    
    # Convert pose encoding
    print("Converting pose encoding...")
    extrinsic, intrinsic = pose_encoding_to_extri_intri(predictions["pose_enc"], images.shape[-2:])
    predictions["extrinsic"] = extrinsic
    predictions["intrinsic"] = intrinsic
    
    # Convert to numpy
    for key in predictions.keys():
        if isinstance(predictions[key], torch.Tensor):
            tensor = predictions[key]
            if tensor.dtype in [torch.bfloat16, torch.float16]:
                tensor = tensor.float()
            predictions[key] = tensor.cpu().numpy().squeeze(0)
    predictions['pose_enc_list'] = None
    
    # Generate world points from depth
    print("Computing world points...")
    depth_map = predictions["depth"]
    world_points = unproject_depth_map_to_point_map(depth_map, predictions["extrinsic"], predictions["intrinsic"])
    predictions["world_points_from_depth"] = world_points
    
    # Save predictions
    prediction_save_path = os.path.join(output_dir, "predictions.npz")
    np.savez(prediction_save_path, **predictions)
    print(f"Saved predictions to {prediction_save_path}")
    
    # Create GLB
    print("Creating 3D model...")
    glbfile = os.path.join(output_dir, f"scene_conf{conf_thres}.glb")
    glbscene = predictions_to_glb(
        predictions,
        conf_thres=conf_thres,
        filter_by_frames="All",
        mask_black_bg=False,
        mask_white_bg=False,
        show_cam=show_cam,
        mask_sky=False,
        target_dir=output_dir,
        prediction_mode=prediction_mode,
    )
    glbscene.export(file_obj=glbfile)
    print(f"Saved 3D model to {glbfile}")
    
    print("\n✅ Reconstruction complete!")
    return glbfile


def create_viewer_html(glb_filename, output_dir, default_conf_thres=50.0):
    """Create an HTML viewer for the GLB file"""
    html_content = f"""
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>3D Reconstruction Viewer</title>
    <script type="module" src="https://unpkg.com/@google/model-viewer/dist/model-viewer.min.js"></script>
    <style>
        body {{
            margin: 0;
            padding: 20px;
            font-family: Arial, sans-serif;
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            min-height: 100vh;
        }}
        .container {{
            max-width: 1400px;
            margin: 0 auto;
            background: white;
            border-radius: 20px;
            padding: 30px;
            box-shadow: 0 20px 60px rgba(0,0,0,0.3);
        }}
        h1 {{
            text-align: center;
            color: #333;
            margin-bottom: 10px;
        }}
        .info {{
            text-align: center;
            color: #666;
            margin-bottom: 30px;
        }}
        model-viewer {{
            width: 100%;
            height: 700px;
            background-color: #f0f0f0;
            border-radius: 10px;
            box-shadow: inset 0 2px 10px rgba(0,0,0,0.1);
        }}
        .controls {{
            margin-top: 20px;
            text-align: center;
            padding: 20px;
            background: #f8f9fa;
            border-radius: 10px;
        }}
        .depth-panel {{
            margin-top: 20px;
            padding: 20px;
            background: #fff;
            border-radius: 10px;
            box-shadow: 0 10px 25px rgba(0,0,0,0.15);
        }}
        .depth-header {{
            display: flex;
            justify-content: space-between;
            align-items: center;
            margin-bottom: 10px;
        }}
        .depth-controls {{
            display: flex;
            gap: 10px;
            align-items: center;
            flex-wrap: wrap;
        }}
        .depth-img {{
            width: 100%;
            max-height: 600px;
            object-fit: contain;
            background: #111;
            border-radius: 8px;
        }}
        .controls button {{
            margin: 5px;
            padding: 10px 20px;
            font-size: 14px;
            border: none;
            border-radius: 5px;
            cursor: pointer;
            background: #667eea;
            color: white;
            transition: all 0.3s;
        }}
        .controls button:hover {{
            background: #764ba2;
            transform: translateY(-2px);
            box-shadow: 0 5px 15px rgba(0,0,0,0.2);
        }}
        .controls button:disabled {{
            background: #ccc;
            cursor: not-allowed;
            transform: none;
        }}
        .confidence-control {{
            margin: 20px auto;
            padding: 25px;
            background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
            border-radius: 10px;
            max-width: 600px;
        }}
        .confidence-label {{
            font-size: 16px;
            font-weight: bold;
            color: #333;
            margin-bottom: 15px;
            display: flex;
            justify-content: space-between;
            align-items: center;
        }}
        .confidence-value {{
            color: #667eea;
            font-size: 20px;
        }}
        .slider-container {{
            display: flex;
            align-items: center;
            gap: 15px;
        }}
        .slider {{
            flex: 1;
            height: 8px;
            border-radius: 5px;
            background: #ddd;
            outline: none;
            -webkit-appearance: none;
        }}
        .slider::-webkit-slider-thumb {{
            -webkit-appearance: none;
            appearance: none;
            width: 20px;
            height: 20px;
            border-radius: 50%;
            background: #667eea;
            cursor: pointer;
            transition: all 0.3s;
        }}
        .slider::-webkit-slider-thumb:hover {{
            background: #764ba2;
            transform: scale(1.2);
        }}
        .slider::-moz-range-thumb {{
            width: 20px;
            height: 20px;
            border-radius: 50%;
            background: #667eea;
            cursor: pointer;
            border: none;
            transition: all 0.3s;
        }}
        .slider::-moz-range-thumb:hover {{
            background: #764ba2;
            transform: scale(1.2);
        }}
        .preset-buttons {{
            display: flex;
            gap: 10px;
            justify-content: center;
            margin-top: 15px;
        }}
        .preset-buttons button {{
            padding: 8px 16px;
            font-size: 12px;
            border: none;
            border-radius: 5px;
            cursor: pointer;
            background: white;
            color: #667eea;
            border: 2px solid #667eea;
            transition: all 0.3s;
        }}
        .preset-buttons button:hover {{
            background: #667eea;
            color: white;
        }}
        .loading-overlay {{
            position: fixed;
            top: 0;
            left: 0;
            width: 100%;
            height: 100%;
            background: rgba(0, 0, 0, 0.7);
            display: none;
            justify-content: center;
            align-items: center;
            z-index: 9999;
        }}
        .loading-content {{
            background: white;
            padding: 30px;
            border-radius: 10px;
            text-align: center;
        }}
        .spinner {{
            border: 4px solid #f3f3f3;
            border-top: 4px solid #667eea;
            border-radius: 50%;
            width: 40px;
            height: 40px;
            animation: spin 1s linear infinite;
            margin: 0 auto 15px;
        }}
        @keyframes spin {{
            0% {{ transform: rotate(0deg); }}
            100% {{ transform: rotate(360deg); }}
        }}
        .stats {{
            margin-top: 20px;
            display: grid;
            grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
            gap: 15px;
        }}
        .stat-card {{
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            color: white;
            padding: 20px;
            border-radius: 10px;
            text-align: center;
        }}
        .stat-label {{
            font-size: 14px;
            opacity: 0.9;
            margin-bottom: 5px;
        }}
        .stat-value {{
            font-size: 24px;
            font-weight: bold;
        }}
        .footer {{
            margin-top: 20px;
            text-align: center;
            color: #666;
            font-size: 14px;
        }}
    </style>
</head>
<body>
    <div class="container">
        <h1>🏛️ 3D Scene Reconstruction</h1>
        <div class="info">
            Interactive 3D viewer - Use mouse to rotate, zoom, and pan
        </div>
        
        <model-viewer
            src="{os.path.basename(glb_filename)}"
            alt="3D Scene Reconstruction"
            auto-rotate
            camera-controls
            camera-orbit="0deg 75deg 2m"
            shadow-intensity="1"
            exposure="1"
            environment-image="neutral">
            
            <div class="progress-bar hide" slot="progress-bar">
                <div class="update-bar"></div>
            </div>
        </model-viewer>
        
        <div class="confidence-control">
            <div class="confidence-label">
                <span>🎯 Confidence Threshold</span>
                <span class="confidence-value" id="confValue">{default_conf_thres:.1f}%</span>
            </div>
            <div class="slider-container">
                <span style="color: #666;">0%</span>
                <input type="range" min="0" max="100" value="{default_conf_thres}" 
                       class="slider" id="confSlider" step="0.1">
                <span style="color: #666;">100%</span>
            </div>
            <div class="preset-buttons">
                <button onclick="setConfidence(25)">Low (25%)</button>
                <button onclick="setConfidence(50)">Medium (50%)</button>
                <button onclick="setConfidence(75)">High (75%)</button>
                <button onclick="setConfidence(90)">Very High (90%)</button>
            </div>
            <div style="margin-top: 15px;">
                <button onclick="updateModel()" style="padding: 12px 30px; font-size: 16px; 
                        background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); 
                        border: none; color: white; border-radius: 5px; cursor: pointer;">
                    🔄 Update Model
                </button>
            </div>
        </div>
        
        <div class="depth-panel">
            <div class="depth-header">
                <h3 style="margin:0;">🖼️ DPT Depth Visualization</h3>
                <div class="depth-controls">
                    <label for="depthFrame">Frame: <span id="depthFrameVal">0</span></label>
                    <input type="range" id="depthFrame" min="0" max="0" value="0" step="1">
                    <select id="colormap">
                        <option value="inferno">inferno</option>
                        <option value="turbo">turbo</option>
                        <option value="magma">magma</option>
                        <option value="viridis">viridis</option>
                        <option value="gray">gray</option>
                    </select>
                    <button onclick="refreshDepth()">Refresh</button>
                </div>
            </div>
            <img id="depthImg" class="depth-img" alt="Depth visualization" />
            <div style="margin-top:8px;color:#666;" id="depthMeta">Frames: -, Size: -</div>
        </div>

        <div class="controls">
            <button onclick="document.querySelector('model-viewer').cameraOrbit = '0deg 75deg 2m'">Reset View</button>
            <button onclick="toggleAutoRotate()">Toggle Auto-Rotate</button>
            <button onclick="downloadModel()">Download Model</button>
        </div>
        
        <div class="loading-overlay" id="loadingOverlay">
            <div class="loading-content">
                <div class="spinner"></div>
                <p style="color: #333; font-size: 16px;">Regenerating 3D model...</p>
                <p style="color: #666; font-size: 14px;">This may take a few moments</p>
            </div>
        </div>
        
        <div class="stats">
            <div class="stat-card">
                <div class="stat-label">Current Model</div>
                <div class="stat-value" id="currentModel">{os.path.basename(glb_filename)}</div>
            </div>
            <div class="stat-card">
                <div class="stat-label">Output Directory</div>
                <div class="stat-value">{os.path.basename(output_dir)}</div>
            </div>
            <div class="stat-card">
                <div class="stat-label">Current Confidence</div>
                <div class="stat-value" id="currentConf">{default_conf_thres:.1f}%</div>
            </div>
        </div>
        
        <div class="footer">
            <p>Generated with VGGT Mini - Visual Geometry Grounded Transformer</p>
            <p>🔄 Rotate: Left click + drag | 🔍 Zoom: Scroll | 📐 Pan: Right click + drag</p>
        </div>
    </div>
    
    <script>
        const slider = document.getElementById('confSlider');
        const confValue = document.getElementById('confValue');
        const outputDir = '{output_dir}';
    const depthImg = document.getElementById('depthImg');
    const depthFrame = document.getElementById('depthFrame');
    const depthFrameVal = document.getElementById('depthFrameVal');
    const depthMeta = document.getElementById('depthMeta');
    const colormapSel = document.getElementById('colormap');
        
        // Update displayed value when slider moves
        slider.oninput = function() {{
            confValue.textContent = parseFloat(this.value).toFixed(1) + '%';
        }}
        
        function setConfidence(value) {{
            slider.value = value;
            confValue.textContent = value.toFixed(1) + '%';
        }}
        
        function toggleAutoRotate() {{
            const viewer = document.querySelector('model-viewer');
            viewer.autoRotate = !viewer.autoRotate;
        }}
        
        function downloadModel() {{
            const viewer = document.querySelector('model-viewer');
            const src = viewer.src;
            window.location.href = src;
        }}

        async function loadDepthMeta() {{
            try {{
                const resp = await fetch('/depth_meta');
                if (!resp.ok) throw new Error('depth_meta failed');
                const meta = await resp.json();
                const S = meta.frames || 0;
                depthFrame.max = Math.max(0, S-1);
                depthFrame.value = 0;
                depthFrameVal.textContent = '0';
                depthMeta.textContent = `Frames: ${{S}}, Size: ${{meta.height}}x${{meta.width}}`;
                await refreshDepth();
            }} catch (e) {{
                console.error(e);
                depthMeta.textContent = 'Depth not available';
            }}
        }}

        async function refreshDepth() {{
            const f = parseInt(depthFrame.value);
            depthFrameVal.textContent = f.toString();
            const cm = colormapSel.value;
            const url = `/depth_png?frame=${{f}}&cm=${{cm}}&t=${{Date.now()}}`;
            depthImg.src = url;
        }}

        depthFrame.addEventListener('input', refreshDepth);
        colormapSel.addEventListener('change', refreshDepth);

        // Load depth metadata on startup
        loadDepthMeta();
        
        async function updateModel() {{
            const confThres = parseFloat(slider.value);
            const loadingOverlay = document.getElementById('loadingOverlay');
            const viewer = document.querySelector('model-viewer');
            
            // Show loading overlay
            loadingOverlay.style.display = 'flex';
            
            try {{
                // Call backend API to regenerate model
                const response = await fetch('/regenerate', {{
                    method: 'POST',
                    headers: {{
                        'Content-Type': 'application/json',
                    }},
                    body: JSON.stringify({{
                        conf_thres: confThres,
                        output_dir: outputDir
                    }})
                }});
                
                if (!response.ok) {{
                    throw new Error('Failed to regenerate model');
                }}
                
                const data = await response.json();
                
                // Update model viewer
                const newModelPath = data.glb_file + '?t=' + new Date().getTime(); // Cache busting
                viewer.src = newModelPath;
                
                // Update stats
                document.getElementById('currentModel').textContent = data.glb_filename;
                document.getElementById('currentConf').textContent = confThres.toFixed(1) + '%';
                
                // Hide loading overlay after model loads
                viewer.addEventListener('load', function() {{
                    loadingOverlay.style.display = 'none';
                }}, {{ once: true }});
                
            }} catch (error) {{
                console.error('Error:', error);
                alert('Failed to update model: ' + error.message);
                loadingOverlay.style.display = 'none';
            }}
        }}
    </script>
</body>
</html>
    """
    
    html_path = os.path.join(output_dir, "viewer.html")
    with open(html_path, 'w') as f:
        f.write(html_content)
    
    print(f"Created viewer HTML: {html_path}")
    return html_path


def start_server(output_dir, input_dir, port=8000, model=None, device=None, 
                 show_cam=True, prediction_mode="Depthmap and Camera Branch"):
    """Start a Flask server to serve the viewer and handle regeneration requests"""
    
    app = Flask(__name__)
    CORS(app)
    
    # Store regeneration parameters
    app.config['output_dir'] = output_dir
    app.config['input_dir'] = input_dir
    app.config['model'] = model
    app.config['device'] = device
    app.config['show_cam'] = show_cam
    app.config['prediction_mode'] = prediction_mode
    
    @app.route('/')
    def index():
        return send_from_directory(output_dir, 'viewer.html')
    
    @app.route('/<path:path>')
    def serve_file(path):
        return send_from_directory(output_dir, path)
    
    @app.route('/regenerate', methods=['POST'])
    def regenerate():
        try:
            data = request.json
            conf_thres = float(data['conf_thres'])
            
            print(f"\n🔄 Regenerating model with confidence threshold: {conf_thres}%")
            
            # Get image paths
            image_extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp']
            image_paths = []
            for ext in image_extensions:
                image_paths.extend(glob.glob(os.path.join(input_dir, f"*{ext}")))
                image_paths.extend(glob.glob(os.path.join(input_dir, f"*{ext.upper()}")))
            image_paths = sorted(image_paths)
            
            # Load predictions
            prediction_path = os.path.join(output_dir, "predictions.npz")
            if not os.path.exists(prediction_path):
                return jsonify({'error': 'Predictions file not found'}), 404
            
            predictions = dict(np.load(prediction_path, allow_pickle=True))
            
            # Regenerate GLB with new confidence threshold
            glbfile = os.path.join(output_dir, f"scene_conf{conf_thres}.glb")
            glbscene = predictions_to_glb(
                predictions,
                conf_thres=conf_thres,
                filter_by_frames="All",
                mask_black_bg=False,
                mask_white_bg=False,
                show_cam=show_cam,
                mask_sky=False,
                target_dir=output_dir,
                prediction_mode=prediction_mode,
            )
            glbscene.export(file_obj=glbfile)
            
            print(f"✅ Model regenerated: {glbfile}")
            
            return jsonify({
                'status': 'success',
                'glb_file': os.path.basename(glbfile),
                'glb_filename': os.path.basename(glbfile),
                'conf_thres': conf_thres
            })
            
        except Exception as e:
            print(f"❌ Error regenerating model: {e}")
            import traceback
            traceback.print_exc()
            return jsonify({'error': str(e)}), 500

    @app.route('/depth_meta', methods=['GET'])
    def depth_meta():
        try:
            prediction_path = os.path.join(output_dir, "predictions.npz")
            if not os.path.exists(prediction_path):
                return jsonify({'frames': 0, 'height': 0, 'width': 0}), 200
            preds = np.load(prediction_path, allow_pickle=True)
            depth = preds.get('depth')
            if depth is None:
                return jsonify({'frames': 0, 'height': 0, 'width': 0}), 200
            if depth.ndim == 2:
                frames, height, width = 1, depth.shape[0], depth.shape[1]
            elif depth.ndim == 3:
                frames, height, width = depth.shape[0], depth.shape[1], depth.shape[2]
            elif depth.ndim == 4 and depth.shape[-1] == 1:
                frames, height, width = depth.shape[0], depth.shape[1], depth.shape[2]
            else:
                # Unknown shape
                frames = int(depth.shape[0]) if depth.shape[0] > 0 else 0
                height = int(depth.shape[-2])
                width = int(depth.shape[-1])
            return jsonify({'frames': int(frames), 'height': int(height), 'width': int(width)}), 200
        except Exception as e:
            return jsonify({'frames': 0, 'height': 0, 'width': 0, 'error': str(e)}), 200

    def _colormap(depth2d: np.ndarray, cm_name: str = 'inferno') -> np.ndarray:
        # depth2d is (H, W) float array
        valid = depth2d > 0
        if valid.any():
            mn = float(depth2d[valid].min())
            mx = float(depth2d[valid].max())
        else:
            mn, mx = 0.0, 1.0
        denom = max(mx - mn, 1e-6)
        norm = np.clip((depth2d - mn) / denom, 0.0, 1.0)
        if mpl_cm is not None and hasattr(mpl_cm, cm_name):
            cmap = getattr(mpl_cm, cm_name)
            rgb = (cmap(norm)[..., :3] * 255).astype(np.uint8)
        else:
            # Gray fallback
            gray = (norm * 255).astype(np.uint8)
            rgb = np.stack([gray, gray, gray], axis=-1)
        return rgb

    @app.route('/depth_png', methods=['GET'])
    def depth_png():
        try:
            frame = int(request.args.get('frame', 0))
            cm_name = request.args.get('cm', 'inferno')
            prediction_path = os.path.join(output_dir, "predictions.npz")
            if not os.path.exists(prediction_path):
                return jsonify({'error': 'predictions.npz not found'}), 404
            preds = np.load(prediction_path, allow_pickle=True)
            depth = preds.get('depth')
            if depth is None:
                return jsonify({'error': 'depth not found in predictions'}), 404
            if depth.ndim == 2:
                depth2d = depth
            elif depth.ndim == 3:
                frame = max(0, min(depth.shape[0]-1, frame))
                depth2d = depth[frame]
            elif depth.ndim == 4 and depth.shape[-1] == 1:
                frame = max(0, min(depth.shape[0]-1, frame))
                depth2d = depth[frame, ..., 0]
            else:
                return jsonify({'error': f'unsupported depth shape {depth.shape}'}), 400

            rgb = _colormap(depth2d, cm_name)
            if Image is None:
                # Fallback to raw bytes as PNG via minimal encoder is not available; return as JSON error
                # but try to use numpy to encode as PPM in-memory and send as octet-stream (viewer won't display)
                buf = BytesIO()
                header = f"P6\n{rgb.shape[1]} {rgb.shape[0]}\n255\n".encode('ascii')
                buf.write(header)
                buf.write(rgb.tobytes())
                buf.seek(0)
                return send_file(buf, mimetype='application/octet-stream')

            img = Image.fromarray(rgb)
            out = BytesIO()
            img.save(out, format='PNG')
            out.seek(0)
            return send_file(out, mimetype='image/png')
        except Exception as e:
            return jsonify({'error': str(e)}), 500
    
    # Start server in a separate thread
    def run_server():
        print(f"\n🌐 Server started at http://localhost:{port}")
        print(f"📂 Serving files from: {output_dir}")
        print(f"\n✨ Opening browser... (Press Ctrl+C to stop the server)")
        app.run(host='0.0.0.0', port=port, debug=False, use_reloader=False)
    
    server_thread = threading.Thread(target=run_server, daemon=True)
    server_thread.start()
    
    # Open browser
    webbrowser.open(f"http://localhost:{port}/viewer.html")
    
    # Keep main thread alive
    try:
        server_thread.join()
    except KeyboardInterrupt:
        print("\n\n👋 Server stopped")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Direct 3D reconstruction from images")
    parser.add_argument("--input-dir", default="/data0/liqifeng/ZYC/vggt/mini_vggt/input_images",
                        help="Directory containing input images")
    parser.add_argument("--output-dir", default=None,
                        help="Output directory (default: input_dir + '_output')")
    parser.add_argument("--conf-thres", type=float, default=50.0,
                        help="Confidence threshold percentage (0-100)")
    parser.add_argument("--show-cam", action="store_true", default=True,
                        help="Show camera positions in 3D model")
    parser.add_argument("--prediction-mode", default="Pointmap Branch",  
                        choices=["Depthmap and Camera Branch", "Pointmap Branch"],
                        help="Prediction mode to use")
    parser.add_argument("--visualize", action="store_true", default=True,
                        help="Start web server to visualize the 3D model")
    parser.add_argument("--port", type=int, default=8000,
                        help="Port for web server (default: 8000)")
    
    args = parser.parse_args()
    
    # Set default output directory if not specified
    if args.output_dir is None:
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        args.output_dir = f"{args.input_dir}_output_{timestamp}"
     
    try:
        glbfile = reconstruct(
            args.input_dir,
            args.output_dir,
            args.conf_thres,
            args.show_cam,
            args.prediction_mode
        )
        
        # Create HTML viewer with default confidence threshold
        html_path = create_viewer_html(glbfile, args.output_dir, args.conf_thres)
        
        print(f"\n📁 Output files:")
        print(f"   - 3D Model: {glbfile}")
        print(f"   - Viewer HTML: {html_path}")
        print(f"   - Predictions: {os.path.join(args.output_dir, 'predictions.npz')}")
        
        # Start visualization server if requested
        if args.visualize:
            # Load model for regeneration
            device = "cuda" if torch.cuda.is_available() else "cpu"
            model = load_model(device)
            
            start_server(
                args.output_dir, 
                args.input_dir,
                args.port,
                model,
                device,
                args.show_cam,
                args.prediction_mode
            )
        else:
            print(f"\n💡 To visualize the model with adjustable confidence, run:")
            print(f"   python {__file__} --visualize --output-dir {args.output_dir}")
            print(f"\n   Or open manually: {html_path}")
        
    except Exception as e:
        print(f"\n❌ Error: {e}")
        import traceback
        traceback.print_exc()
        sys.exit(1)
