import numpy as np
import cv2
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.patches as patches
from mpl_toolkits.mplot3d import Axes3D
import os
from pathlib import Path
import warnings
import json
from scalePcd import scalePcd
warnings.filterwarnings("ignore")

class VideoVisualizer:
    def __init__(self, output_path="video.mp4", fps=30, frame_size=(1920, 1080), view_resolution=(2000, 2000), 
                 output_format="img", save_frames=False):
        """
        Initialize the video visualizer.
        
        Args:
            output_path (str): Path to save the output video
            fps (int): Frames per second for the video
            frame_size (tuple): Frame size (width, height)
            view_resolution (tuple): Resolution for each individual view (width, height)
            output_format (str): Output format - "mp4", "avi", "webm", "gif", or "img"
            save_frames (bool): Whether to save individual frames as PNG images
        """
        self.output_path = Path(output_path)
        self.fps = fps
        self.frame_size = frame_size
        self.view_resolution = view_resolution  # Global parameter for view clarity
        self.output_format = output_format.lower()
        self.save_frames = save_frames
        self.frames = []
        self.frame_count = 0
        
        # Define PCD types to process
        self.pcd_types = [
            'pointcloud',
            'segpointcloud', 
            'repointcloud',
            'injpointcloud',
            'vggtpointcloud2',
            'vggtpointcloud3'
        ]
        
        # Adjust output path based on format
        if self.output_format == "gif":
            self.output_path = self.output_path.with_suffix('.gif')
        elif self.output_format == "webm":
            self.output_path = self.output_path.with_suffix('.webm')
        elif self.output_format == "avi":
            self.output_path = self.output_path.with_suffix('.avi')
        elif self.output_format == "img":
            self.output_path = self.output_path.with_suffix('')  # Remove extension for folder
        else:  # mp4
            self.output_path = self.output_path.with_suffix('.mp4')
        
        # Create output directory if it doesn't exist
        self.output_path.parent.mkdir(parents=True, exist_ok=True)
        
        # Create frames directory if saving frames
        if self.save_frames or self.output_format == "img":
            self.frames_dir = self.output_path.parent / f"{self.output_path.stem}_frames"
            self.frames_dir.mkdir(parents=True, exist_ok=True)
        
        # Video writer will be initialized when first frame is captured
        self.video_writer = None
        
        if self.save_frames or self.output_format == "img":
            print(f"Frames directory: {self.frames_dir}")
        
    def _init_video_writer(self, frame_shape):
        """Initialize the video writer with the frame shape."""
        if self.output_format == "img":
            return  # No video writer needed for frames only
            
        if self.output_format == "gif":
            return  # GIF will be created from collected frames
            
        # Choose codec based on format
        if self.output_format == "webm":
            fourcc = cv2.VideoWriter_fourcc(*'VP80')  # WebM codec
        elif self.output_format == "avi":
            fourcc = cv2.VideoWriter_fourcc(*'XVID')  # AVI codec
        else:  # mp4
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # MP4 codec
            
        self.video_writer = cv2.VideoWriter(
            str(self.output_path),
            fourcc,
            self.fps,
            (frame_shape[1], frame_shape[0])  # (width, height)
        )
        
        if not self.video_writer.isOpened():
            print(f"Warning: Could not initialize video writer for {self.output_format} format")
            print("Falling back to frames-only mode")
            self.output_format = "img"
            self.video_writer = None

    def _create_depth_visualization(self, depth):
        """
        Create a colored depth visualization.
        
        Args:
            depth (np.ndarray): Depth array of shape (H, W)
            
        Returns:
            np.ndarray: Colored depth image
        """
        # Normalize depth to 0-255
        depth_norm = depth.copy()
        depth_norm = (depth_norm - depth_norm.min()) / (depth_norm.max() - depth_norm.min() + 1e-8)
        depth_norm = (depth_norm * 255).astype(np.uint8)
        
        # Apply colormap
        depth_colored = cv2.applyColorMap(depth_norm, cv2.COLORMAP_JET)
        return depth_colored
        
    def _create_pointcloud_visualization(self, pcd, img_size=None, num_views=4):
        """
        Create multiple 3D point cloud visualizations from different angles.
        
        Args:
            pcd (np.ndarray): Point cloud array of shape (N, 6) where columns are [x, y, z, r, g, b]
            img_size (tuple): Size of each output image (uses view_resolution if None)
            num_views (int): Number of different viewing angles (default 4)
            
        Returns:
            list: List of rendered point cloud images from different angles
        """
        if img_size is None:
            img_size = self.view_resolution
            
        # Extract coordinates and colors
        if pcd.shape[1] >= 6:
            xyz = pcd[:, :3]
            rgb = pcd[:, 3:6]
            
            # 使用 scalePcd 统一处理颜色缩放
            rgb = scalePcd(rgb, tag="videovisualizer_pointcloud")
            
            # For visualization, normalize to 0-1 range
            rgb = rgb / 255.0
            # Ensure RGB values are in valid range [0, 1]
            rgb = np.clip(rgb, 0, 1)
        else:
            xyz = pcd[:, :3]
            rgb = None
            
        # Filter out zero points (invalid points)
        valid_mask = np.any(xyz != 0, axis=1)
        xyz = xyz[valid_mask]
        if rgb is not None:
            rgb = rgb[valid_mask]
            
        if len(xyz) == 0:
            # Return placeholder images if no valid points
            # return [self._create_placeholder_image(img_size, f"No Point Cloud View {i+1}") for i in range(num_views)]
            return [None for _ in range(num_views)]
        # Define viewing angles for different views
        view_angles = [
            (30, 45),    # elevation, azimuth
            (30, 135),
            (30, 225),
            (30, 315),
            (60, 45),    # higher elevation views
            (60, 135),
            (60, 225),
            (60, 315)
        ]
        
        # Limit to requested number of views
        view_angles = view_angles[:num_views]
        
        rendered_views = []
        
        for i, (elev, azim) in enumerate(view_angles):
            fig = plt.figure(figsize=(img_size[0]/100, img_size[1]/100), dpi=100)
            ax = fig.add_subplot(111, projection='3d')
            
            if rgb is not None:
                ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], c=rgb, s=10, alpha=0.6)
            else:
                ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], s=10, alpha=0.6)
                
            ax.set_xlabel('X')
            ax.set_ylabel('Y')
            ax.set_zlabel('Z')
            ax.set_title(f'Point Cloud View {i+1}')
            
            # Set viewing angle
            ax.view_init(elev=elev, azim=azim)
            
            # Set equal aspect ratio
            max_range = np.array([xyz[:, 0].max()-xyz[:, 0].min(),
                                 xyz[:, 1].max()-xyz[:, 1].min(),
                                 xyz[:, 2].max()-xyz[:, 2].min()]).max() / 2.0
            mid_x = (xyz[:, 0].max()+xyz[:, 0].min()) * 0.5
            mid_y = (xyz[:, 1].max()+xyz[:, 1].min()) * 0.5
            mid_z = (xyz[:, 2].max()+xyz[:, 2].min()) * 0.5
            ax.set_xlim(mid_x - max_range, mid_x + max_range)
            ax.set_ylim(mid_y - max_range, mid_y + max_range)
            ax.set_zlim(mid_z - max_range, mid_z + max_range)
            
            # Convert to image
            canvas = FigureCanvasAgg(fig)
            canvas.draw()
            
            # Use the newer buffer_rgba() method instead of deprecated tostring_rgb()
            try:
                # Try the newer method first
                buf = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8)
                buf = buf.reshape(canvas.get_width_height()[::-1] + (4,))
                # Convert RGBA to RGB by dropping alpha channel
                buf = buf[:, :, :3]
            except AttributeError:
                # Fallback to older method if available
                try:
                    buf = np.frombuffer(canvas.tostring_rgb(), dtype=np.uint8)
                    buf = buf.reshape(canvas.get_width_height()[::-1] + (3,))
                except AttributeError:
                    # Last resort - use tostring_argb and convert
                    buf = np.frombuffer(canvas.tostring_argb(), dtype=np.uint8)
                    buf = buf.reshape(canvas.get_width_height()[::-1] + (4,))
                    # Convert ARGB to RGB
                    buf = buf[:, :, 1:4]  # Skip alpha channel, take RGB
            
            rendered_views.append(buf)
            plt.close(fig)
            
        return rendered_views

    def _calculate_layout_dimensions(self, num_views):
        """
        Calculate layout dimensions with fixed 3 rows as requested.
        
        Args:
            num_views (int): Number of views to display
            
        Returns:
            tuple: (rows, cols, layout_type, total_width, total_height)
        """
        if num_views == 0:
            return (1, 1, "placeholder", self.view_resolution[0], self.view_resolution[1])
        
        # Always use 3 rows as requested
        cols=3
        rows = max(1, int(np.ceil(num_views / cols)))
        
        # Generate layout type name
        layout_type = f"grid_{rows}x{cols}"
        
        # Calculate total dimensions
        total_width = self.view_resolution[0] * cols
        total_height = self.view_resolution[1] * rows
        
        return (rows, cols, layout_type, total_width, total_height)

    def _create_combined_layout(self, available_views, view_labels):
        """
        Create a combined layout based on the number of available views.
        
        Args:
            available_views (list): List of (label, image) tuples
            view_labels (list): List of view labels
            
        Returns:
            np.ndarray: Combined frame image
        """
        num_views = len(available_views)
        rows, cols, layout_type, total_width, total_height = self._calculate_layout_dimensions(num_views)
        
        if num_views == 0:
            return self._create_placeholder_image((total_width, total_height), "No Data Available")
        
        # Create base canvas
        combined_frame = np.zeros((total_height, total_width, 3), dtype=np.uint8)
        
        if layout_type == "single":
            # Single view - use the entire frame
            view_img = available_views[0][1].copy()
            view_img = self._add_text_overlay(view_img, available_views[0][0], (10, self.view_resolution[1] - 20))
            return view_img
            
        elif layout_type == "horizontal":
            # Two views - side by side
            combined_frame = np.hstack([available_views[0][1], available_views[1][1]])
            combined_frame = self._add_text_overlay(combined_frame, available_views[0][0], (10, self.view_resolution[1] - 20))
            combined_frame = self._add_text_overlay(combined_frame, available_views[1][0], (self.view_resolution[0] + 10, self.view_resolution[1] - 20))
            
        elif layout_type == "three_horizontal":
            # Three views - side by side
            combined_frame = np.hstack([available_views[0][1], available_views[1][1], available_views[2][1]])
            combined_frame = self._add_text_overlay(combined_frame, available_views[0][0], (10, self.view_resolution[1] - 20))
            combined_frame = self._add_text_overlay(combined_frame, available_views[1][0], (self.view_resolution[0] + 10, self.view_resolution[1] - 20))
            combined_frame = self._add_text_overlay(combined_frame, available_views[2][0], (self.view_resolution[0] * 2 + 10, self.view_resolution[1] - 20))
            
        else:
            # Handle all grid layouts generically
            # Create rows of images
            view_idx = 0
            for row in range(rows):
                row_images = []
                for col in range(cols):
                    if view_idx < num_views:
                        row_images.append(available_views[view_idx][1])
                        view_idx += 1
                    else:
                        # Fill empty spots with placeholder
                        placeholder = self._create_placeholder_image(self.view_resolution, "Empty")
                        row_images.append(placeholder)
                
                # Combine images in this row
                if len(row_images) > 0:
                    row_combined = np.hstack(row_images)
                    if row == 0:
                        combined_frame = row_combined
                    else:
                        combined_frame = np.vstack([combined_frame, row_combined])
            
            # Add labels for all views
            view_idx = 0
            for row in range(rows):
                for col in range(cols):
                    if view_idx < num_views:
                        label_x = col * self.view_resolution[0] + 10
                        label_y = (row + 1) * self.view_resolution[1] - 20
                        combined_frame = self._add_text_overlay(
                            combined_frame, 
                            available_views[view_idx][0], 
                            (label_x, label_y)
                        )
                        view_idx += 1
        
        return combined_frame

    def _create_gaussian_visualization(self, gs_data, img_size=None):
        """
        Create a visualization of gaussian splatting data with alpha blending.
        
        Args:
            gs_data (dict): Gaussian splatting data containing positions, colors, etc.
            img_size (tuple): Size of the output image (uses view_resolution if None)
            
        Returns:
            np.ndarray: Rendered gaussian visualization
        """
        if img_size is None:
            img_size = self.view_resolution
            
        # Use higher DPI for better quality
        dpi = 150 if max(img_size) > 800 else 100
        fig = plt.figure(figsize=(img_size[0]/dpi, img_size[1]/dpi), dpi=dpi)
        ax = fig.add_subplot(111, projection='3d')
        
        positions = gs_data['positions']
        colors = gs_data['colors']
        opacity = gs_data['opacity']
        valid_count = gs_data.get('valid_points_count', len(positions))
        
        # Use only valid points
        if isinstance(valid_count, np.ndarray):
            valid_count = int(valid_count)
        valid_positions = positions[:valid_count]
        valid_colors = colors[:valid_count]
        valid_opacity = opacity[:valid_count]
        
        if len(valid_positions) > 0:
            # 使用 scalePcd 统一处理颜色缩放
            valid_colors = scalePcd(valid_colors, tag="videovisualizer_gaussian")
            
            # For visualization, normalize to 0-1 range
            valid_colors = valid_colors / 255.0
            
            # Use opacity for alpha blending - confirmed alpha blending usage
            alpha_values = valid_opacity.flatten()
            alpha_values = np.clip(alpha_values, 0, 1)
            
            # Create scatter plot with alpha blending
            scatter = ax.scatter(valid_positions[:, 0], 
                               valid_positions[:, 1], 
                               valid_positions[:, 2],
                               c=valid_colors, 
                               s=2,
                               alpha=alpha_values)  # Using opacity values for alpha blending
                               
        ax.set_xlabel('X')
        ax.set_ylabel('Y')
        ax.set_zlabel('Z')
        ax.set_title(f'Gaussian Splatting ({valid_count} points)')
        
        # Set equal aspect ratio
        if len(valid_positions) > 0:
            max_range = np.array([valid_positions[:, 0].max()-valid_positions[:, 0].min(),
                                 valid_positions[:, 1].max()-valid_positions[:, 1].min(),
                                 valid_positions[:, 2].max()-valid_positions[:, 2].min()]).max() / 2.0
            mid_x = (valid_positions[:, 0].max()+valid_positions[:, 0].min()) * 0.5
            mid_y = (valid_positions[:, 1].max()+valid_positions[:, 1].min()) * 0.5
            mid_z = (valid_positions[:, 2].max()+valid_positions[:, 2].min()) * 0.5
            ax.set_xlim(mid_x - max_range, mid_x + max_range)
            ax.set_ylim(mid_y - max_range, mid_y + max_range)
            ax.set_zlim(mid_z - max_range, mid_z + max_range)
        
        # Convert to image
        canvas = FigureCanvasAgg(fig)
        canvas.draw()
        
        # Use the newer buffer_rgba() method instead of deprecated tostring_rgb()
        try:
            # Try the newer method first
            buf = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8)
            buf = buf.reshape(canvas.get_width_height()[::-1] + (4,))
            # Convert RGBA to RGB by dropping alpha channel
            buf = buf[:, :, :3]
        except AttributeError:
            # Fallback to older method if available
            try:
                buf = np.frombuffer(canvas.tostring_rgb(), dtype=np.uint8)
                buf = buf.reshape(canvas.get_width_height()[::-1] + (3,))
            except AttributeError:
                # Last resort - use tostring_argb and convert
                buf = np.frombuffer(canvas.tostring_argb(), dtype=np.uint8)
                buf = buf.reshape(canvas.get_width_height()[::-1] + (4,))
                # Convert ARGB to RGB
                buf = buf[:, :, 1:4]  # Skip alpha channel, take RGB
        
        plt.close(fig)
        return buf
        
    def _create_placeholder_image(self, img_size=None, text="No Data"):
        """Create a placeholder image for missing data."""
        if img_size is None:
            img_size = self.view_resolution
            
        placeholder = np.zeros((img_size[1], img_size[0], 3), dtype=np.uint8)
        placeholder.fill(50)  # Dark gray background
        
        # Add text
        font = cv2.FONT_HERSHEY_SIMPLEX
        font_scale = max(1.0, img_size[0] / 400.0)  # Scale font based on resolution
        color = (150, 150, 150)  # Light gray text
        thickness = max(2, int(img_size[0] / 200.0))  # Scale thickness based on resolution
        
        # Calculate text position to center it
        (text_width, text_height), baseline = cv2.getTextSize(text, font, font_scale, thickness)
        text_x = (img_size[0] - text_width) // 2
        text_y = (img_size[1] + text_height) // 2
        
        cv2.putText(placeholder, text, (text_x, text_y), font, font_scale, color, thickness)
        return placeholder

    def _resize_image(self, img, target_size=None):
        """Resize image to target size."""
        if target_size is None:
            target_size = self.view_resolution
        assert img is not None, "Image is None"
        # if img is None:
        #     return self._create_placeholder_image(target_size, "No Image")
        return cv2.resize(img, target_size, interpolation=cv2.INTER_LINEAR)
        
    def _add_text_overlay(self, img, text, position=(10, 30)):
        """Add text overlay to image."""
        # Make a copy to avoid read-only array issues
        img = img.copy()
        
        font = cv2.FONT_HERSHEY_SIMPLEX
        font_scale = max(0.7, self.view_resolution[0] / 600.0)  # Scale font based on resolution
        color = (255, 255, 255)
        thickness = max(2, int(self.view_resolution[0] / 400.0))  # Scale thickness based on resolution
        
        # Add black background for better readability
        (text_width, text_height), baseline = cv2.getTextSize(text, font, font_scale, thickness)
        cv2.rectangle(img, (position[0]-5, position[1]-text_height-5), 
                     (position[0]+text_width+5, position[1]+baseline+5), (0, 0, 0), -1)
        
        cv2.putText(img, text, position, font, font_scale, color, thickness)
        return img

    def _print_ascii_layout(self, num_views, view_labels):
        """Print ASCII visualization of the image layout."""
        if num_views == 0:
            print("📺 Layout: [No views available]")
            return
            
        rows, cols, layout_type, total_width, total_height = self._calculate_layout_dimensions(num_views)
        
        print(f"📺 Layout: {layout_type} ({total_width}x{total_height})")
        print(f"📐 Frame dimensions: {self.view_resolution[0]}x{self.view_resolution[1]} each")
        
        # Create ASCII representation
        ascii_layout = []
        view_idx = 0
        
        for row in range(rows):
            row_str = ""
            for col in range(cols):
                if view_idx < num_views:
                    # Abbreviate view name for ASCII display
                    view_name = view_labels[view_idx]
                    if len(view_name) > 8:
                        view_name = view_name[:5] + "..."
                    row_str += f"[{view_name:8}]"
                    view_idx += 1
                else:
                    row_str += "[        ]"  # Empty slot
                if col < cols - 1:
                    row_str += " "
            ascii_layout.append(row_str)
        
        # Calculate the actual content width (including the padding spaces)
        content_width = len(ascii_layout[0]) if ascii_layout else 20
        
        # Print ASCII layout with proper width calculation
        print("    ┌" + "─" * (content_width + 2) + "┐")
        for row in ascii_layout:
            print(f"    │ {row} │")
        print("    └" + "─" * (content_width + 2) + "┘")

    def preprocess_views(self, unified_data):
        """
        Preprocess and collect all available views from unified data.
        
        Args:
            unified_data (dict): Unified data dictionary containing all sensor data
            
        Returns:
            dict: Dictionary of processed views with descriptive keys
        """
        processed_views = {}
        
        # Get RGB, depth, and segmentation from midBack_camera
        cameras = unified_data.get('cameras', {})
        img = None
        depth = None
        segmentation = None
        
        if 'midBack_camera' in cameras:
            midback_data = cameras['midBack_camera']
            img = midback_data.get('rgb')
            depth = midback_data.get('depth')
            segmentation = midback_data.get('segmentation')
        else:
            # Fallback to observer RGB if midBack_camera not available
            img = unified_data.get('rgb')
            # Try to get depth from any available camera
            for camera_name, camera_data in cameras.items():
                if camera_data.get('depth') is not None:
                    depth = camera_data['depth']
                    break
        
        # Process RGB image
        if img is not None:
            processed_views['RGB'] = self._resize_image(img, self.view_resolution)
        
        # Process depth image
        if depth is not None:
            depth_vis = self._create_depth_visualization(depth)
            processed_views['Depth'] = self._resize_image(depth_vis, self.view_resolution)
        
        # Process segmentation (reuse RGB processing since same shape)
        if segmentation is not None:
            processed_views['Segmentation'] = self._resize_image(segmentation, self.view_resolution)
        
        # Process all PCD types in a loop with detailed logging
        pcd_view_count = 0
        for pcd_type in self.pcd_types:
            pcd_data = unified_data.get(pcd_type)
            if pcd_data is not None:
                pcd_views = self._create_pointcloud_visualization(pcd_data, self.view_resolution, num_views=3)
                for i, pcd_view in enumerate(pcd_views):
                    if pcd_view is not None:
                        view_key = f"{pcd_type.upper()}_View_{i+1}"
                        processed_views[view_key] = self._resize_image(pcd_view, self.view_resolution)
                        pcd_view_count += 1
        
        # Process Gaussian data
        gs = unified_data.get('gaussian')
        if gs is not None:
            gs_vis = self._create_gaussian_visualization(gs, self.view_resolution)
            if gs_vis is not None:
                processed_views['Gaussian_Splatting'] = self._resize_image(gs_vis, self.view_resolution)
        
        return processed_views

    def capture(self, unified_data, timestep=None, episode_idx=None):
        """
        Capture a frame with unified_data format.
        
        Args:
            unified_data (dict): Unified data dictionary containing all sensor data
            timestep (int): Current timestep
            episode_idx (int, optional): Episode index
        """
        try:
            # Get timestep from parameters or try to extract from unified_data
            if timestep is None:
                timestep = unified_data.get('timestep', 0)
            if episode_idx is None:
                episode_idx = unified_data.get('episode_idx')
            
            # print(f"    Creating visualizations for timestep {timestep}")
            
            # Preprocess all views
            processed_views = self.preprocess_views(unified_data)
            
            # Convert to the format expected by _create_combined_layout
            available_views = [(key, view) for key, view in processed_views.items()]
            view_labels = list(processed_views.keys())
            
            # Create layout using the existing combined layout system
            num_views = len(available_views)
            combined_frame = self._create_combined_layout(available_views, view_labels)
            
            # Add timestep information
            text_info = f"Timestep: {timestep}"
            if episode_idx is not None:
                text_info += f" | Episode: {episode_idx}"
            text_info += f" | Views: {num_views} ({', '.join(view_labels)})"
                
            combined_frame = self._add_text_overlay(combined_frame, text_info, (10, 30))
            
            # Print ASCII layout visualization
            # self._print_ascii_layout(num_views, view_labels)
            
            # Save individual frame if requested
            if self.save_frames or self.output_format == "img":
                frame_filename = self.frames_dir / f"frame_{self.frame_count:06d}.png"
                cv2.imwrite(str(frame_filename), cv2.cvtColor(combined_frame, cv2.COLOR_RGB2BGR))
            
            # Store frame for GIF creation
            if self.output_format == "gif":
                self.frames.append(combined_frame)
            
            # Initialize video writer if this is the first frame and not frames-only mode
            if self.video_writer is None and self.output_format not in ["gif", "frames_only"]:
                self._init_video_writer(combined_frame.shape)
                
            # Write frame to video (if video writer is available)
            if self.video_writer is not None:
                # Convert RGB to BGR for OpenCV
                combined_frame_bgr = cv2.cvtColor(combined_frame, cv2.COLOR_RGB2BGR)
                self.video_writer.write(combined_frame_bgr)
            
            self.frame_count += 1
            
        except Exception as e:
            print(f"    Error capturing frame {timestep}: {e}")
            raise

    def _create_gif(self):
        """Create GIF from collected frames."""
        try:
            from PIL import Image
            
            # Convert frames to PIL Images
            pil_frames = []
            for i, frame in enumerate(self.frames):
                print(f"    Converting frame {i} to PIL Image gif",end="\r")
                pil_frame = Image.fromarray(frame)
                pil_frames.append(pil_frame)
            
            # Calculate duration per frame in milliseconds
            duration = int(1000 / self.fps)
            
            # Save as GIF
            if pil_frames:
                pil_frames[0].save(
                    str(self.output_path),
                    save_all=True,
                    append_images=pil_frames[1:],
                    duration=duration,
                    loop=0,
                    optimize=True
                )
                print(f"GIF created successfully:{self.output_path}")
            else:
                print("No frames to create GIF")
                
        except ImportError:
            print("PIL/Pillow not available. Cannot create GIF. Installing with: pip install Pillow")
        except Exception as e:
            print(f"Error creating GIF: {e}")
            
    def finalize_video(self):
        """Finalize and save the video."""
        if self.output_format == "gif":
            self._create_gif()
        elif self.output_format == "img":
            print(f"Total frames: {self.frame_count}")
            self._create_frame_viewer()
        else:
            if self.video_writer is not None:
                self.video_writer.release()
                print(f"Video finalized and saved to: {os.getcwd()}/{self.output_path}")
            else:
                print("No frames were captured, video not created.")
                
        if self.save_frames and self.output_format != "img":
            print(f"Individual frames also saved to: {self.frames_dir}")
            
    def _create_frame_viewer(self):
        """Create a simple HTML viewer for frame sequences."""
        html_path = self.frames_dir / "viewer.html"
        
        # Get list of frame files
        frame_files = sorted([f for f in self.frames_dir.glob("frame_*.png")])
        print(f"    Found {len(frame_files)} frames in {self.frames_dir}")
        
        # Generate the list of frame file names
        frame_file_names = [f.name for f in frame_files]
        
        html_content = f"""
<!DOCTYPE html>
<html>
<head>
    <title>Frame Viewer</title>
    <style>
        body {{ font-family: Arial, sans-serif; text-align: center; margin: 20px; }}
        #frameContainer {{ margin: 20px auto; }}
        #frameImage {{ max-width: 100%; height: auto; border: 1px solid #ccc; }}
        .controls {{ margin: 20px; }}
        button {{ margin: 5px; padding: 10px 20px; font-size: 16px; }}
        #frameInfo {{ margin: 10px; font-size: 18px; }}
        #speedControl {{ margin: 10px; }}
    </style>
</head>
<body>
    <h1>Frame Sequence Viewer</h1>
    <div id="frameInfo">Frame: <span id="currentFrame">1</span> / <span id="totalFrames">{len(frame_files)}</span></div>
    
    <div id="frameContainer">
        <img id="frameImage" src="{frame_file_names[0] if frame_file_names else ''}" alt="Frame">
    </div>
    
    <div class="controls">
        <button onclick="previousFrame()">Previous</button>
        <button onclick="togglePlay()">Play/Pause</button>
        <button onclick="nextFrame()">Next</button>
    </div>
    
    <div id="speedControl">
        Speed: <input type="range" id="speedSlider" min="1" max="30" value="10" onchange="updateSpeed()"> 
        <span id="speedValue">10</span> FPS
    </div>
    
    <script>
        let currentFrameIndex = 0;
        let totalFrames = {len(frame_files)};
        let isPlaying = false;
        let playInterval;
        let fps = 10;
        
        const frameFiles = {json.dumps(frame_file_names)};
        
        function updateFrame() {{
            if (currentFrameIndex >= 0 && currentFrameIndex < totalFrames) {{
                document.getElementById('frameImage').src = frameFiles[currentFrameIndex];
                document.getElementById('currentFrame').textContent = currentFrameIndex + 1;
            }}
        }}
        
        function nextFrame() {{
            currentFrameIndex = (currentFrameIndex + 1) % totalFrames;
            updateFrame();
        }}
        
        function previousFrame() {{
            currentFrameIndex = (currentFrameIndex - 1 + totalFrames) % totalFrames;
            updateFrame();
        }}
        
        function togglePlay() {{
            if (isPlaying) {{
                clearInterval(playInterval);
                isPlaying = false;
            }} else {{
                playInterval = setInterval(nextFrame, 1000 / fps);
                isPlaying = true;
            }}
        }}
        
        function updateSpeed() {{
            fps = document.getElementById('speedSlider').value;
            document.getElementById('speedValue').textContent = fps;
            if (isPlaying) {{
                clearInterval(playInterval);
                playInterval = setInterval(nextFrame, 1000 / fps);
            }}
        }}
        
        // Keyboard controls
        document.addEventListener('keydown', function(event) {{
            switch(event.key) {{
                case 'ArrowLeft':
                    previousFrame();
                    break;
                case 'ArrowRight':
                    nextFrame();
                    break;
                case ' ':
                    event.preventDefault();
                    togglePlay();
                    break;
            }}
        }});
    </script>
</body>
</html>
        """
        
        with open(html_path, 'w') as f:
            f.write(html_content)
            
        print(f"HTML frame viewer created: {html_path}")
        print("Open this file in a web browser to view the frame sequence")
            
    def __del__(self):
        """Cleanup when object is destroyed."""
        if hasattr(self, 'video_writer') and self.video_writer is not None:
            self.video_writer.release()


def test_visualizer():
    """
    Test function to verify the visualizer works with different formats.
    Now includes segmentation and midback camera testing.
    """
    # Create dummy data in the expected unified format
    dummy_unified_data = {
        'rgb': np.random.randint(0, 255, (84, 84, 3), dtype=np.uint8),
        'depth': np.random.rand(84, 84),
        'segmentation': np.random.randint(0, 10, (84, 84), dtype=np.uint8),  # Added segmentation
        'pointcloud': np.random.rand(100, 6) * 2 - 1,  # Random point cloud
        'gaussian': {
            'positions': np.random.rand(50, 3) * 2 - 1,
            'colors': np.random.rand(50, 3),
            'opacity': np.random.rand(50, 1),
            'rotation': np.random.rand(50, 4),
            'scaling': np.random.rand(50, 3),
            'valid_points_count': 50
        }
    }
    
    # Create dummy midback camera data
    dummy_midback_data = {
        'midBack_camera': {
            'rgba': np.random.randint(0, 255, (84, 84, 4), dtype=np.uint8),
            'depth': np.random.rand(84, 84) * 2.0,  # Different depth range
            'mesh_segmentation': np.random.randint(0, 15, (84, 84, 3), dtype=np.uint8)  # Colored segmentation
        }
    }
    
    # Test different formats
    formats_to_test = ["mp4", "gif", "img"]
    
    for fmt in formats_to_test:
        print(f"\nTesting {fmt} format with segmentation and midback camera...")
        visualizer = VideoVisualizer(f"test_video_with_seg.{fmt}", view_resolution=(400, 400), output_format=fmt)
        
        for i in range(5):  # Fewer frames for testing
            # Test with all data types including segmentation
            visualizer.capture(dummy_unified_data, i)
            
            # Test with midback camera data
            if i % 2 == 0:
                visualizer.capture(dummy_unified_data, i + 100)
            
            # Test with some missing data types
            if i % 3 == 0:
                partial_data = dummy_unified_data.copy()
                partial_data['gaussian'] = None
                partial_data['segmentation'] = None
                visualizer.capture(partial_data, i + 200)
            
        visualizer.finalize_video()
    
    print("\nAll tests completed!")
    print("Key improvements:")
    print("1. Added segmentation visualization with colormap support")
    print("2. Prioritizes midback camera data when available")
    print("3. Supports both single-channel and colored segmentation")
    print("4. Point cloud now shows 4 different viewing angles by default")
    print("5. Smart layout system handles 0-7 views automatically")
    print("6. Proper stacking and centering for different view counts")
    print("7. Better labeling system with abbreviated names")
    print("\nMidback Camera Integration:")
    print("1. Automatically detects midBack_camera data in camera_data parameter")
    print("2. Prioritizes midback RGB, depth, and segmentation over unified data")
    print("3. Labels views appropriately to indicate data source")
    print("\nRecommended for VSCode viewing:")
    print("1. Use 'img' format and open the HTML viewer")
    print("2. Use 'gif' format for simple animations")
    print("3. Individual PNG frames can be viewed directly in VSCode")


if __name__ == "__main__":
    test_visualizer()