import gradio as gr
import cv2
import numpy as np
from PIL import Image, ImageDraw
import os
import tempfile

from sam.sam_client import predict
import tempfile
import os

def sam_model_api(image, points=None, boxes=None):
    """Real SAM model API for segmentation"""
    # Save image to temp file
    with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as tmp:
        cv2.imwrite(tmp.name, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
        tmp_path = tmp.name
    
    try:
        if points:
            # Convert points format for SAM API
            point_coords = [list(p) for p in points]
            point_labels = [1] * len(points)  # All foreground points
            masks, scores, _ = predict(
                tmp_path,
                point_coords=point_coords,
                point_labels=point_labels,
                multimask_output=False
            )
            mask = masks[0].astype(np.uint8) * 255
            
        elif boxes:
            # Convert boxes format for SAM API
            box = [boxes[0][0], boxes[0][1], boxes[0][2], boxes[0][3]]
            masks, _, _ = predict(
                tmp_path,
                box=box,
                multimask_output=False
            )
            mask = masks[0].astype(np.uint8) * 255
        else:
            mask = np.zeros(image.shape[:2], dtype=np.uint8)
            
        return mask
        
    finally:
        # Clean up temp file
        if os.path.exists(tmp_path):
            os.unlink(tmp_path)

def inpainting_model_api(original_img, mask, new_clothing_img):
    """Improved inpainting model API for virtual try-on"""
    # Resize clothing image to match mask size
    h, w = mask.shape[:2]
    new_clothing_img = cv2.resize(new_clothing_img, (w, h))
    
    # Convert mask to 3 channels for color image
    mask_3ch = cv2.merge([mask, mask, mask])
    
    # Normalize mask to 0-1 range
    mask_float = mask_3ch.astype(float)/255.0
    
    # Apply Gaussian blur to mask edges for smoother blending
    mask_blur = cv2.GaussianBlur(mask_float, (21, 21), 0)
    
    # Blend images using the mask
    result = original_img * (1 - mask_blur) + new_clothing_img * mask_blur
    result = result.astype(np.uint8)
    
    # Apply detail enhancement to make clothing look more natural
    result = cv2.detailEnhance(result, sigma_s=5, sigma_r=0.1)
    
    return [result]  # Return single result instead of multiple

def enhancement_model_api(image, method="detail", sigma_s=10, sigma_r=0.15, contrast=1.2):
    """Advanced image enhancement API"""
    if method == "detail":
        enhanced = cv2.detailEnhance(image, sigma_s=sigma_s, sigma_r=sigma_r)
    elif method == "color":
        lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
        l, a, b = cv2.split(lab)
        clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
        cl = clahe.apply(l)
        enhanced = cv2.merge((cl, a, b))
        enhanced = cv2.cvtColor(enhanced, cv2.COLOR_LAB2RGB)
    elif method == "contrast":
        enhanced = cv2.convertScaleAbs(image, alpha=contrast, beta=0)
    else:
        enhanced = image.copy()
    
    return enhanced

def create_inpainting_ui():
    """Create inpainting UI for virtual try-on"""
    with gr.Blocks() as ui:
        with gr.Row():
            with gr.Column():
                original_image = gr.Image(label="Original Image", interactive=False, width="100%", height="auto")
                segmented_image = gr.Image(label="Segmented Area", interactive=False, width="100%", height="auto")
                clothing_image = gr.Image(label="Clothing Image", type="numpy", width="100%", height="auto")
            with gr.Column():
                inpainted_image = gr.Image(label="Inpainted Result", interactive=False, width="100%", height="auto")
        
        with gr.Row():
            prev_btn = gr.Button("Previous")
            inpaint_btn = gr.Button("Inpaint", variant="primary")
            next_btn = gr.Button("Next", variant="primary")
        
        # Inpaint handler
        def inpaint_handler(original_img, mask_img, clothing_img):
            if None in [original_img, mask_img, clothing_img]:
                return None
            return inpainting_model_api(original_img, mask_img[:,:,0], clothing_img)[0]
        
        # Bind event handlers
        inpaint_btn.click(
            inpaint_handler,
            [original_image, segmented_image, clothing_image],
            inpainted_image
        )
    
    return ui

def create_enhancement_ui():
    """Create enhancement UI with options"""
    with gr.Blocks() as ui:
        with gr.Row():
            with gr.Column():
                input_image = gr.Image(label="Input Image", interactive=False, width="100%", height="auto")
                with gr.Accordion("Enhancement Settings", open=False):
                    method = gr.Radio(
                        ["detail", "color", "contrast"], 
                        label="Enhancement Method",
                        value="detail"
                    )
                    sigma_s = gr.Slider(1, 20, value=10, label="Detail Strength")
                    sigma_r = gr.Slider(0.01, 0.5, value=0.15, step=0.01, label="Detail Smoothness")
                    contrast = gr.Slider(0.5, 2.0, value=1.2, step=0.1, label="Contrast")
            with gr.Column():
                enhanced_image = gr.Image(label="Enhanced Image", interactive=False, width="100%", height="auto")
        
        with gr.Row():
            prev_btn = gr.Button("Previous")
            enhance_btn = gr.Button("Enhance", variant="primary")
            finish_btn = gr.Button("Finish", variant="primary")
        
        # Enhance handler
        def enhance_handler(image, method, sigma_s, sigma_r, contrast):
            if image is None:
                return None
            return enhancement_model_api(image, method, sigma_s, sigma_r, contrast)

        # Bind event handlers
        enhance_btn.click(
            enhance_handler,
            [input_image, method, sigma_s, sigma_r, contrast],
            enhanced_image
        )

    return ui

def create_segmentation_ui():
    """Create segmentation UI with point/box selection"""
    with gr.Blocks() as ui:
        with gr.Row():
            with gr.Column():
                input_image = gr.Image(
                    label="Original Image", 
                    type="numpy",
                    interactive=True,
                    height="66vh",
                    elem_classes=["scrollable-image"]
                )
                with gr.Row():
                    # zoom_in = gr.Button("🔍", min_width=30)
                    # zoom_out = gr.Button("🔎", min_width=30) 
                    # reset_view = gr.Button("🔄", min_width=30)
                    add_point = gr.Button("📍", min_width=30)
                    remove_point = gr.Button("❌", min_width=30)
                    clear_points = gr.Button("🧹", min_width=30)
                    add_box = gr.Button("🟩", min_width=30)
                    remove_box = gr.Button("🟥", min_width=30)
                    clear_boxes = gr.Button("🧼", min_width=30)
            with gr.Column():
                output_mask = gr.Image(label="Segmentation Mask", interactive=False, width="100%", height="66vh")
        
        with gr.Row():
            segment_btn = gr.Button("Segment", variant="primary")
            next_btn = gr.Button("Next", variant="primary")
        
        # State variables
        points = gr.State([])
        boxes = gr.State([])
        current_image = gr.State(None)
        # zoom_level = gr.State(1.0)
        drawing_box = gr.State(False)
        box_start = gr.State(None)

        # Point handlers
        def add_point_handler(image, points_list, evt: gr.SelectData):
            points_list.append((evt.index[0], evt.index[1]))
            marked = image.copy()
            for pt in points_list:
                cv2.drawMarker(marked, pt, (0,0,255), cv2.MARKER_STAR, 20, 2)
            return marked, points_list

        def remove_point_handler(image, points_list):
            if points_list:
                points_list.pop()
                marked = image.copy()
                for pt in points_list:
                    cv2.drawMarker(marked, pt, (0,0,255), cv2.MARKER_STAR, 20, 2)
                return marked, points_list
            return image, points_list

        def clear_points_handler(image):
            return image, []

        # Box handlers
        def start_box_handler(image, evt: gr.SelectData):
            return image, True, (evt.index[0], evt.index[1])

        def update_box_handler(image, is_drawing, start, evt: gr.SelectData):
            if not is_drawing:
                return image, is_drawing, start
            temp = image.copy()
            cv2.rectangle(temp, start, (evt.index[0], evt.index[1]), (0,255,0), 2)
            return temp, is_drawing, start

        def end_box_handler(image, is_drawing, start, boxes_list, evt: gr.SelectData):
            if not is_drawing:
                return image, is_drawing, start, boxes_list
            boxes_list.append((start[0], start[1], evt.index[0], evt.index[1]))
            temp = image.copy()
            for box in boxes_list:
                x1, y1, x2, y2 = box
                cv2.rectangle(temp, (x1, y1), (x2, y2), (0,255,0), 2)
            return temp, False, None, boxes_list
        
        # Segment handler
        def segment_handler(image, points_list, boxes_list):
            if image is None:
                return None
            return sam_model_api(image, points=points_list, boxes=boxes_list)

        # # Zoom handlers with debug logging
        # def zoom_in_handler(image, zoom_lvl, current_img):
        #     print(f"\n[DEBUG] zoom_in_handler called")
        #     print(f"Input - image: {type(image)}, zoom_lvl: {zoom_lvl}, current_img: {type(current_img)}")
        #     if image is None:
        #         print("Warning: image is None")
        #         return image, zoom_lvl, current_img
        #     zoom_lvl = min(zoom_lvl * 1.2, 3.0)
        #     h, w = image.shape[:2]
        #     print(f"Original size: {w}x{h}, zoom level: {zoom_lvl}")
        #     resized = cv2.resize(image, (int(w*zoom_lvl), int(h*zoom_lvl)))
        #     print(f"Resized to: {int(w*zoom_lvl)}x{int(h*zoom_lvl)}")
        #     return resized, zoom_lvl, resized

        # def zoom_out_handler(image, zoom_lvl, current_img):
        #     print(f"\n[DEBUG] zoom_out_handler called") 
        #     print(f"Input - image: {type(image)}, zoom_lvl: {zoom_lvl}, current_img: {type(current_img)}")
        #     if image is None:
        #         print("Warning: image is None")
        #         return image, zoom_lvl, current_img
        #     zoom_lvl = max(zoom_lvl / 1.2, 0.5)
        #     h, w = image.shape[:2]
        #     print(f"Original size: {w}x{h}, zoom level: {zoom_lvl}")
        #     resized = cv2.resize(image, (int(w*zoom_lvl), int(h*zoom_lvl)))
        #     print(f"Resized to: {int(w*zoom_lvl)}x{int(h*zoom_lvl)}")
        #     return resized, zoom_lvl, resized

        # def reset_zoom_handler(image, zoom_lvl, current_img):
        #     print(f"\n[DEBUG] reset_zoom_handler called")
        #     print(f"Input - image: {type(image)}, zoom_lvl: {zoom_lvl}, current_img: {type(current_img)}")
        #     if image is None:
        #         print("Warning: image is None")
        #         return image, zoom_lvl, current_img
        #     h, w = image.shape[:2]
        #     resized = cv2.resize(image, (w, h))
        #     print(f"Reset to original size: {w}x{h}")
        #     return resized, 1.0, resized

        # # Bind zoom handlers
        # zoom_in.click(
        #     zoom_in_handler,
        #     [input_image, zoom_level, current_image],
        #     [input_image, zoom_level, current_image]
        # )
        
        # zoom_out.click(
        #     zoom_out_handler,
        #     [input_image, zoom_level, current_image],
        #     [input_image, zoom_level, current_image]
        # )
        
        # reset_view.click(
        #     reset_zoom_handler,
        #     [input_image, zoom_level, current_image],
        #     [input_image, zoom_level, current_image]
        # )

    return ui

def main():
    """Main application"""
    with gr.Blocks(title="Virtual Try-On System", css="static/styles.css") as app:
        gr.Markdown("# Virtual Try-On System")
        
        with gr.Tabs():
            with gr.TabItem("1. Segmentation"):
                seg_ui = create_segmentation_ui()
            with gr.TabItem("2. Inpainting"):
                inpaint_ui = create_inpainting_ui()
            with gr.TabItem("3. Enhancement"):
                enhance_ui = create_enhancement_ui()
        
        # Navigation handlers
        def next_to_inpainting(mask_img):
            return gr.Tabs(selected=1), mask_img
        
        def prev_to_segmentation():
            return gr.Tabs(selected=0)
        
        def next_to_enhancement():
            return gr.Tabs(selected=2)
        
        def prev_to_inpainting():
            return gr.Tabs(selected=1)
        
        # Debug function to print UI structure
        def print_ui_structure(ui, name):
            print(f"\n{name} UI Structure:")
            for i, child in enumerate(ui.children):
                print(f"  {i}: {type(child).__name__}")
                if hasattr(child, 'children'):
                    for j, subchild in enumerate(child.children):
                        print(f"    {j}: {type(subchild).__name__}")

        # Print UI structures for debugging
        print_ui_structure(seg_ui, "Segmentation")
        print_ui_structure(inpaint_ui, "Inpainting") 
        print_ui_structure(enhance_ui, "Enhancement")

        # Get button references by searching through components
        def find_button(ui):
            for child in ui.children:
                if isinstance(child, gr.Row):
                    for btn in child.children:
                        if isinstance(btn, gr.Button):
                            if btn.value == "Next":
                                return btn
                            elif btn.value == "Previous":
                                return btn
            return None

        seg_next_btn = find_button(seg_ui)
        inpaint_prev_btn = find_button(inpaint_ui)
        inpaint_next_btn = find_button(inpaint_ui)
        enhance_prev_btn = find_button(enhance_ui)

        # Verify all buttons are found
        if None in [seg_next_btn, inpaint_prev_btn, inpaint_next_btn, enhance_prev_btn]:
            print("Error: Failed to find navigation buttons")
            print("Found buttons:", {
                "seg_next": seg_next_btn,
                "inpaint_prev": inpaint_prev_btn,
                "inpaint_next": inpaint_next_btn,
                "enhance_prev": enhance_prev_btn
            })
            return

        # Bind navigation
        seg_next_btn.click(
            next_to_inpainting,
            [seg_ui.children[-3]],  # output_mask
            [app.children[1], inpaint_ui.children[0]]  # tabs and segmented_image
        )
        
        inpaint_prev_btn.click(
            prev_to_segmentation,
            None,
            [app.children[1]]
        )
        
        inpaint_next_btn.click(
            next_to_enhancement,
            None,
            [app.children[1]]
        )
        
        enhance_prev_btn.click(
            prev_to_inpainting,
            None,
            [app.children[1]]
        )
    
    app.launch()

if __name__ == "__main__":
    main()
