import torch
import sys
import os
# Updated path for the current location in third_party/vggt
# No need to append third_party since we're already inside it

# Handle both relative imports (when used as module) and absolute imports (when run directly)
try:
    from .models.vggt import VGGT
    from .utils.load_fn import load_and_preprocess_images
    from .utils.pose_enc import pose_encoding_to_extri_intri
    from .utils.geometry import unproject_depth_map_to_point_map
except ImportError:
    # Add the current directory to the path for direct execution
    current_dir = os.path.dirname(os.path.abspath(__file__))
    sys.path.insert(0, current_dir)
    from models.vggt import VGGT
    from utils.load_fn import load_and_preprocess_images
    from utils.pose_enc import pose_encoding_to_extri_intri
    from utils.geometry import unproject_depth_map_to_point_map

import matplotlib.pyplot as plt
import numpy as np

class VGGTmodel:
    def __init__(self):
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16
        self.model = self._load_model()
        
        # Class properties that will be set by infer()
        self.images = None
        self.images_batch = None  # Store the batched version for consistency
        self.aggregated_tokens_list = None
        self.ps_idx = None

    def _load_model(self):
        """Load the VGGT model from pretrained weights"""
        model = VGGT()
        _URL = "/mnt/workspace/yuhao/vggt/models--facebook--VGGT-1B/snapshots/860abec7937da0a4c03c41d3c269c366e82abdf9/model.pt"
        model.load_state_dict(torch.load(_URL))
        model = model.to(self.device)
        return model

    def infer(self, image_names):
        """Load images and extract latent tokens, setting class properties"""
        # Load and preprocess images
        self.images = load_and_preprocess_images(image_names).to(self.device)
        
        # Extract latent tokens
        with torch.no_grad():
            with torch.cuda.amp.autocast(dtype=self.dtype):
                self.images_batch = self.images[None]  # add batch dimension and store
                self.aggregated_tokens_list, self.ps_idx = self.model.aggregator(self.images_batch)

    def predict_camera(self):
        """Predict camera extrinsic and intrinsic parameters"""
        if self.aggregated_tokens_list is None or self.images is None:
            raise ValueError("Must call infer() first to set images and tokens")
        
        pose_enc = self.model.camera_head(self.aggregated_tokens_list)[-1]
        extrinsic, intrinsic = pose_encoding_to_extri_intri(pose_enc, self.images.shape[-2:])
        return extrinsic, intrinsic

    def predict_depth(self):
        """Predict depth map and confidence"""
        if self.aggregated_tokens_list is None or self.images_batch is None or self.ps_idx is None:
            raise ValueError("Must call infer() first to set images and tokens")
        
        depth_map, depth_conf = self.model.depth_head(self.aggregated_tokens_list, self.images_batch, self.ps_idx)
        print(f"Depth map shape: {depth_map.shape}")  # torch.Size([1, 2, 392, 518, 1])
        return depth_map, depth_conf

    def predict_points(self):
        """Predict point map and confidence"""
        if self.aggregated_tokens_list is None or self.images_batch is None or self.ps_idx is None:
            raise ValueError("Must call infer() first to set images and tokens")
        
        point_map, point_conf = self.model.point_head(self.aggregated_tokens_list, self.images_batch, self.ps_idx)
        return point_map, point_conf

    def predict_tracks(self, query_points=None):
        """Predict tracks for given query points"""
        if self.aggregated_tokens_list is None or self.images_batch is None or self.ps_idx is None:
            raise ValueError("Must call infer() first to set images and tokens")
        
        if query_points is None:
            # Default query points
            query_points = torch.FloatTensor([[100.0, 200.0], [60.72, 259.94]]).to(self.device)
        
        track_list, vis_score, conf_score = self.model.track_head(
            self.aggregated_tokens_list, self.images_batch, self.ps_idx, query_points=query_points[None]
        )
        return track_list, vis_score, conf_score

    def save_depth(self, depth_map, prefix="depth_map"):
        """Save depth map as colored image"""
        # Normalize depth values to [0, 1] for color mapping
        depth_map_np = depth_map.squeeze(0).cpu().numpy()  # Remove batch dim and convert to numpy
        depth_map_norm = (depth_map_np - depth_map_np.min()) / (depth_map_np.max() - depth_map_np.min())
        
        # Create a color map from red (near) to blue (far)
        cmap = plt.get_cmap('coolwarm')  # 'coolwarm' goes from blue to red, so we reverse it
        for i in range(depth_map_norm.shape[0]):  # Iterate over the two images
            depth_image = depth_map_norm[i, :, :, 0]  # Extract depth values for the i-th image
            depth_colored = cmap(depth_image)[:, :, :3]  # Apply colormap and remove alpha channel
            depth_colored = (depth_colored * 255).astype(np.uint8)  # Convert to 8-bit image
            plt.imsave(f"{prefix}_{i}.jpg", depth_colored)

    def unproject_depth(self, depth_map, extrinsic, intrinsic, visualize=True):
        """Unproject depth map to 3D points and optionally visualize"""
        # Construct 3D Points from Depth Maps and Cameras
        # which usually leads to more accurate 3D points than point map branch
        point_map_by_unprojection = unproject_depth_map_to_point_map(
            depth_map.squeeze(0),
            extrinsic.squeeze(0),
            intrinsic.squeeze(0)
        )
        print(f"Unprojected points shape: {point_map_by_unprojection.shape}")
        
        if isinstance(point_map_by_unprojection, torch.Tensor):
            points = point_map_by_unprojection.cpu().numpy()
        else:
            points = point_map_by_unprojection
        
        if visualize:
            self._visualize_3d_points(points)
        
        return points

    def _visualize_3d_points(self, points, sample_rate=200):
        """Visualize 3D points in a scatter plot"""
        fig = plt.figure(figsize=(10, 8))
        ax = fig.add_subplot(111, projection='3d')
        
        colors = ['r', 'b']
        for i in range(points.shape[0]):
            pts = points[i].reshape(-1, 3)[::sample_rate]
            ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], c=colors[i], s=1, alpha=0.5)
        
        ax.set_xlabel('X')
        ax.set_ylabel('Y')
        ax.set_zlabel('Z')
        ax.set_title('3D Point Cloud from Depth Maps')
        
        plt.savefig('3d_points.png', dpi=300)
        plt.show()

    def process_full_pipeline(self, image_names):
        """Run the complete VGGT processing pipeline"""
        # Run inference to set class properties
        self.infer(image_names)
        
        # Predict camera parameters
        extrinsic, intrinsic = self.predict_camera()
        
        # Predict depth
        depth_map, depth_conf = self.predict_depth()
        
        # Predict points
        point_map, point_conf = self.predict_points()
        
        # Predict tracks
        track_list, vis_score, conf_score = self.predict_tracks()
        
        # Save depth map visualization
        self.save_depth(depth_map, image_names[0])
        
        # Unproject depth to 3D points
        points = self.unproject_depth(depth_map, extrinsic, intrinsic)
        
        return {
            'images': self.images,
            'extrinsic': extrinsic,
            'intrinsic': intrinsic,
            'depth_map': depth_map,
            'depth_conf': depth_conf,
            'point_map': point_map,
            'point_conf': point_conf,
            'track_list': track_list,
            'vis_score': vis_score,
            'conf_score': conf_score,
            'points_3d': points
        }


def main():
    """Main function to run the VGGT processing pipeline"""
    model = VGGTmodel()
    image_names = ["../../script/arm1.png", "../../script/arm2.png"]
    
    # Method 1: Use the full pipeline
    results = model.process_full_pipeline(image_names)
    print("Processing completed successfully!")

    
    return results


if __name__ == "__main__":
    main() 