import os
import glob
import argparse
import numpy as np
import torch
import cv2
import matplotlib.pyplot as plt
from segment_anything import (
    sam_model_registry,
    sam_hq_model_registry,
    SamPredictor
)


def initialize_sam(checkpoint_path, model_type="vit_h", device="cuda", use_sam_hq=False):
    """
    Initialize the SAM model with the specified checkpoint.

    Args:
        checkpoint_path: Path to the SAM model checkpoint
        model_type: Type of the SAM model ("vit_h", "vit_l", "vit_b")
        device: Device to run the model on ("cuda" or "cpu")

    Returns:
        SAM predictor object
    """
        # initialize SAM
    if use_sam_hq:
        predictor = SamPredictor(sam_hq_model_registry[model_type](checkpoint=checkpoint_path).to(device))
    else:
        predictor = SamPredictor(sam_model_registry[model_type](checkpoint=checkpoint_path).to(device))

    return predictor


def get_image_list(images_folder):
    # Get list of images in the folder
    image_extensions = ['*.jpg', '*.jpeg', '*.png', '*.bmp', '*.tif', '*.tiff']
    image_list = []
    for ext in image_extensions:
        image_list.extend(glob.glob(os.path.join(images_folder, ext)))

    # Sort images naturally (1, 2, 10 instead of 1, 10, 2)
    image_list = sorted(image_list)
    if not image_list:
        print(f"No images found in {images_folder}")
        return None

    return image_list

def get_bbox_from_mask(mask):
    """
    Extract the bounding box from a binary mask.
    Args:
        mask: Binary mask (numpy array)
    Returns:
        Bounding box coordinates (x_min, y_min, x_max, y_max)
    """
    # Find the indices where the mask is True
    mask_indices = np.where(mask > 0)

    # Get the minimum and maximum indices along each axis
    y_min, y_max = np.min(mask_indices[0]), np.max(mask_indices[0])
    x_min, x_max = np.min(mask_indices[1]), np.max(mask_indices[1])
    return x_min, y_min, x_max, y_max

class Tracker:
    def __init__(self, sam_model, confidence_threshold=0.8):
        self.frame_count = -1
        self.confidence_threshold = confidence_threshold
        self.last_mask = None
        self.sam_model = sam_model
        self.device = sam_model.device

    def init(self, initial_mask):
        # Handle initial mask (can be path or numpy array)
        if isinstance(initial_mask, str):
            current_mask = cv2.imread(initial_mask, cv2.IMREAD_GRAYSCALE)
            self.last_mask = (current_mask > 128).astype(np.uint8)  # Binarize mask
        else:
            self.last_mask = initial_mask.copy()
        self.frame_count = 0

    def sam_with_points(self, image: np.ndarray, input_points: np.ndarray, point_labels: np.ndarray=None):
        if point_labels is None:
            point_labels = np.ones(len(input_points))

        self.sam_model.set_image(image)
        # Get masks from SAM with manual input
        masks, scores, _ = self.sam_model.predict(
            point_coords=input_points,
            point_labels=point_labels,
            multimask_output=False
        )

        # Select the mask with highest score
        best_mask_idx = np.argmax(scores)
        segmentation_mask = masks[best_mask_idx].astype(np.uint8)
        return segmentation_mask, scores[best_mask_idx]

    def sam_with_mask(self, image: np.ndarray, mask: np.ndarray):
        segmentation_mask = None
        score = None
        # Use the contour of the previous mask as a prompt
        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        if contours:
            # Use the largest contour
            largest_contour = max(contours, key=cv2.contourArea)

            # Sample points from the contour (adjust as needed)
            num_points = min(20, len(largest_contour))
            indices = np.linspace(0, len(largest_contour) - 1, num_points, dtype=int)
            point_coords = largest_contour[indices].reshape(num_points, 2)

            segmentation_mask, score = self.sam_with_points(image, point_coords)

        return segmentation_mask, score

    def manual_sam(self, image):
        input_points = []
        point_labels = []

        fig, ax = plt.subplots(figsize=(10, 10))
        ax.imshow(image)
        foreground_markers = []
        background_markers = []

        def onclick(event):
            if event.inaxes != ax:
                return

            x, y = int(event.xdata), int(event.ydata)
            if event.button == 1:  # Left click -> Foreground
                input_points.append([x, y])
                point_labels.append(1)
                marker = ax.plot(x, y, 'ro', markersize=8)[0]
                foreground_markers.append(marker)
            elif event.button == 3:  # Right click -> Background
                input_points.append([x, y])
                point_labels.append(0)
                marker = ax.plot(x, y, 'bo', markersize=8)[0]
                background_markers.append(marker)

            fig.canvas.draw()

        def onkey(event):
            if event.key == 'backspace':  # Press Backspace to remove the last point
                if input_points:
                    input_points.pop()
                    last_label = point_labels.pop()
                    if last_label > 0:
                        m = foreground_markers.pop()
                        m.remove()
                    else:
                        m = background_markers.pop()
                        m.remove()
                    fig.canvas.draw()
            elif event.key == 'escape':  # Press ESC to clear all points
                input_points.clear()
                point_labels.clear()
                for m in foreground_markers + background_markers:
                    m.remove()
                foreground_markers.clear()
                background_markers.clear()
                fig.canvas.draw()
            elif event.key == 'enter':  # Press Enter to submit
                plt.close(fig)

        button_cid = fig.canvas.mpl_connect('button_press_event', onclick)
        key_cid = fig.canvas.mpl_connect('key_press_event', onkey)

        plt.title("Left button: Foreground | Right button: Background | Backspace: Delete | ESC: Clear | Enter: Submit")
        plt.show()
        fig.canvas.mpl_disconnect(button_cid)
        fig.canvas.mpl_disconnect(key_cid)
        if not input_points:
            print("No points selected. Skipping this frame.")
            return None

        input_points = np.array(input_points)
        point_labels = np.array(point_labels)

        self.sam_model.set_image(image)
        segmentation_mask, score = self.sam_with_points(image, input_points, point_labels)
        return segmentation_mask, score

    def track(self, image, interactive=False):
        if self.frame_count < 0:
            print("Tracker not initialized. Run initialization...")
            segmentation_mask, score = self.manual_sam(image)
        else:
            segmentation_mask, score = self.sam_with_mask(image, self.last_mask)

        if interactive:
            self.display(image, segmentation_mask, score)
            # Ask for confirmation or correction
            while input("Is resegmentation required? (`r` to resegment, anything else to skip)").lower() == 'n':  # If not acceptable, ask for manual input
                segmentation_mask = self.manual_sam(image)
                # Display the corrected mask
                plt.figure(figsize=(10, 10))
                plt.imshow(image)
                plt.imshow(segmentation_mask, alpha=0.5, cmap='viridis')
                plt.title("Corrected Mask")
                plt.show()

        self.last_mask = segmentation_mask
        self.frame_count += 1

        return segmentation_mask

    @staticmethod
    def display(image, segmentation_mask, score):
        plt.rcdefaults()  # Reset to matplotlib's default parameters
        # Create figure and enable default interaction tools (e.g., navigation toolbar)
        fig, axs = plt.subplots(1, 3, figsize=(15, 5))

        # Set up the toolbar for default interaction
        toolbar = fig.canvas.manager.toolbar
        if toolbar is not None:
            toolbar.pan()  # Optional: activate pan mode

        # Define keyboard event callback function
        def on_key(event):
            plt.close(fig)  # Close current figure

        axs[0].imshow(image)
        axs[0].set_title("Original Frame")
        # Connect keyboard event callback
        # cid = fig.canvas.mpl_connect('key_press_event', on_key)

        if segmentation_mask is None:
            segmentation_mask = np.zeros(image.shape[:2], dtype=np.uint8)

        axs[1].imshow(segmentation_mask, cmap='gray')
        axs[1].set_title("Previous Mask")

        axs[2].imshow(image)
        axs[2].imshow(segmentation_mask, alpha=0.5, cmap='viridis')
        axs[2].set_title(f"Predicted Mask (Score: {score:.2f})")

        plt.tight_layout()
        plt.show()

    def run(self, images_folder, output_folder, confidence_threshold=0.8, fps=30, interactive=False):
        """
        Track an object in a sequence of images using Segment Anything based on an initial segmentation mask.
        
        Args:
            images_folder: Path to the folder containing image sequences
            output_folder: Path to save the output segmentation results
            confidence_threshold: Threshold for accepting segmentation predictions
            fps: Frames per second if saving video
        """
        image_list = get_image_list(images_folder)

        if not image_list:
            print(f"No images found in {images_folder}")
            return

        # Get image dimensions from first image
        first_image = cv2.imread(image_list[0])
        height, width = first_image.shape[:2]

        # Set up video writer if requested
        video_writer = None
        output_video_path = os.path.join(output_folder, "output_video.mp4")
        fourcc = cv2.VideoWriter_fourcc(*"mp4v")
        video_writer = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))

        # Process each image
        for frame_count, image_path in enumerate(image_list):
            print(f"Processing image {frame_count+1}/{len(image_list)}: {os.path.basename(image_path)}")

            # Read the image
            frame = cv2.imread(image_path)
            if frame is None:
                print(f"Failed to read image: {image_path}")
                continue

            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            segmentation_mask = self.track(frame_rgb, interactive=interactive)

            # Visualize the result
            colored_mask = np.zeros_like(frame)
            colored_mask[:, :, 1] = segmentation_mask * 255  # Green mask

            # Blend the mask with the original frame
            alpha = 0.5
            blended = cv2.addWeighted(frame, 1, colored_mask, alpha, 0)

            # Draw the contour
            contours, _ = cv2.findContours(segmentation_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            cv2.drawContours(blended, contours, -1, (0, 255, 0), 2)

            image_name = os.path.splitext(os.path.basename(image_path))[0]
            semantic_dir = os.path.join(output_folder, "masks")
            os.makedirs(semantic_dir, exist_ok=True)
            debug_dir = os.path.join(output_folder, "debug")
            os.makedirs(debug_dir, exist_ok=True)
            # Save the segmentation mask and blended result to output folder
            mask_filename = os.path.join(semantic_dir, f"{image_name}.png")
            blend_filename = os.path.join(debug_dir, f"{image_name}.png")

            cv2.imwrite(mask_filename, segmentation_mask * 255)
            cv2.imwrite(blend_filename, blended)

            # Write frame to output video if requested
            if video_writer:
                video_writer.write(blended)

        # Release resources
        if video_writer:
            video_writer.release()
            print(f"Video output saved to {output_video_path}")

        print(f"Processing complete. Results saved to {output_folder}")


if __name__ == "__main__":
    parser = argparse.ArgumentParser("Object tracking segmentation", add_help=True)
    parser.add_argument(
        "--input_dir", "-i", type=str, required=True, help="input directory"
    )
    parser.add_argument(
        "--output_dir", "-o", type=str, default="./outputs", help="output directory"
    )
    parser.add_argument(
        "--sam_version", type=str, default="vit_h", required=False, help="SAM ViT version: vit_b / vit_l / vit_h"
    )
    parser.add_argument(
        "--sam_checkpoint", type=str, default="./sam_vit_h_4b8939.pth", help="path to sam checkpoint file"
    )
    parser.add_argument(
        "--sam_hq_checkpoint", type=str, default="./sam_vit_h_4b8939_hq.pth", help="path to sam-hq checkpoint file"
    )
    parser.add_argument(
        "--use_sam_hq", action="store_true", help="using sam-hq for prediction"
    )
    parser.add_argument(
        "--device", type=str, default="cpu", help="running on which device, default=cpu"
    )
    parser.add_argument(
        "--confidence_threshold", type=float, default=0.5, help="confidence threshold for mask prediction"
    )
    parser.add_argument(
        "--fps", "-f", type=float, default=30, help="frame rate for output video"
    )
    parser.add_argument(
        "--interactive", action="store_true", help="interactive mode for tracking"
    )
    args = parser.parse_args()

    use_sam_hq = args.use_sam_hq
    if use_sam_hq:
        sam_checkpoint = args.sam_hq_checkpoint
    else:
        sam_checkpoint = args.sam_checkpoint

    sam_model = initialize_sam(sam_checkpoint, args.sam_version, args.device, use_sam_hq)

    tracker = Tracker(sam_model=sam_model,
                      confidence_threshold=args.confidence_threshold)

    # Run tracking
    tracker.run(
        images_folder=args.input_dir,
        output_folder=args.output_dir,
        fps=args.fps,
        interactive=args.interactive
    )
