"""
Gradio Web UI for PRISM

Gradio
1. 
2. OverLoCK + DINOv2
3. Grad-CAM
4. 
5. 
"""
import gradio as gr
import torch
from PIL import Image
import yaml
from pathlib import Path
import time
import logging
import numpy as np
from torchvision import transforms

from src.config import DEVICE, DATA_YAML, STAGE2_CONFIG
from src.inference.local_inference import LocalInference
from src.utils.gpu_memory_manager import get_memory_manager
from src.utils.task_queue import get_task_queue
from src.visualization import FeatureVisualizer, GradCAMVisualizer, StatisticsPlotter

# 
logging.basicConfig(
    level=logging.INFO,
    format='[%(asctime)s] %(levelname)s: %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

# 
inferencer = None
memory_manager = None
task_queue = None
class_names = []
feature_visualizer = None
gradcam_visualizer = None
statistics_plotter = None

# 
latest_detections = []


def initialize_system():
    """"""
    global inferencer, memory_manager, task_queue, class_names
    global feature_visualizer, gradcam_visualizer, statistics_plotter

    logger.info("=" * 80)
    logger.info("Initializing PRISM Gradio UI")
    logger.info("=" * 80)

    # GPU
    memory_manager = get_memory_manager()

    # 
    task_queue = get_task_queue()

    # 
    with open(DATA_YAML, 'r') as f:
        data = yaml.safe_load(f)
        class_names = data['names']

    # 
    logger.info("Loading inference models...")
    inferencer = LocalInference(use_ema=True, use_tta=False)

    # 
    logger.info("Initializing visualizers...")
    refiner_model = inferencer.refiner
    feature_visualizer = FeatureVisualizer(refiner_model, DEVICE)
    gradcam_visualizer = GradCAMVisualizer(refiner_model, DEVICE)
    statistics_plotter = StatisticsPlotter()

    logger.info("=" * 80)
    logger.info("System ready!")
    logger.info("=" * 80)


def get_system_info() -> str:
    """"""
    if memory_manager is None:
        return "System not initialized"

    gpu_info = memory_manager.gpu_info
    params = memory_manager.optimal_params

    info = f"""
###  System Information

**GPU**: {gpu_info['name']}
**Memory**: {gpu_info['total_memory_mb']:.0f} MB ({gpu_info['total_memory_mb']/1024:.1f} GB)
**Available**: {gpu_info['free_memory_mb']:.0f} MB

**Optimal Settings**:
- Batch Size (Stage1): {params['batch_size_stage1']}
- Batch Size (Stage2): {params['batch_size_stage2']}
- Max Concurrent Users: {params['max_concurrent_users']}
- Recommendation: {params['recommendation']}

**Queue Status**: {task_queue.get_queue_size()} tasks pending
"""
    return info


# ============================================================================
# Tab 1: Detection ( - )
# ============================================================================

def predict_with_comparison(image, conf_threshold, use_ema, use_tta, show_comparison):
    """
    Detection with three-image comparison
    """
    if image is None:
        return None, None, None, "Please upload an image", ""

    # 
    task_id = task_queue.submit(
        _predict_internal_with_comparison,
        image, conf_threshold, use_ema, use_tta, show_comparison
    )

    # 
    status = task_queue.get_task_status(task_id)
    position = status['position']

    if position > 1:
        queue_msg = f"⏳  {position} ..."
        yield None, None, None, queue_msg, ""
        time.sleep(0.5)

    # 
    try:
        results = task_queue.wait_for_result(task_id, timeout=300)
        return results

    except TimeoutError:
        return None, None, None, " 5", ""
    except Exception as e:
        return None, None, None, f" : {str(e)}", ""


def _predict_internal_with_comparison(image, conf_threshold, use_ema, use_tta, show_comparison):
    """"""
    global latest_detections

    start_time = time.time()

    # 
    temp_path = Path("temp") / f"gradio_{int(time.time()*1000)}.jpg"
    temp_path.parent.mkdir(exist_ok=True)
    image.save(temp_path)

    try:
        # 
        detections = inferencer.predict_single(
            temp_path,
            conf_thresh=conf_threshold,
            save_viz=True
        )

        # Tab
        latest_detections = detections

        # 
        result_path = temp_path.parent / f"{temp_path.stem}_result.png"
        if result_path.exists():
            result_img = Image.open(result_path)
        else:
            result_img = image

        # 
        original_img = image
        stage1_img = None
        stage2_img = result_img

        if show_comparison:
            # TODO: Stage1
            # Stage1proposals
            stage1_img = image  # 

        # 
        if len(detections) == 0:
            text_output = " ****\n\n"
        else:
            text_output = f"##  \n\n **{len(detections)}** :\n\n"

            # 
            class_counts = {}
            for det in detections:
                cls = det['class']
                class_counts[cls] = class_counts.get(cls, 0) + 1

            text_output += "### \n"
            for cls, count in class_counts.items():
                text_output += f"- **{cls}**: {count} \n"

            text_output += "\n### \n"
            for i, det in enumerate(detections, 1):
                text_output += f"\n** {i}**\n"
                text_output += f"- : {det['class']}\n"
                text_output += f"- : {det['confidence']:.2%}\n"
                bbox = det['bbox']
                text_output += f"- : [{bbox[0]:.0f}, {bbox[1]:.0f}, {bbox[2]:.0f}, {bbox[3]:.0f}]\n"

        # 
        processing_time = time.time() - start_time
        stats_output = f"""
### ⏱ 

- : {processing_time:.2f} 
- : {len(detections)}
- : {'EMA' if use_ema else 'Standard'}
- TTA: {'' if use_tta else ''}
"""

        if DEVICE == 'cuda':
            free_mb, total_mb = memory_manager.check_available_memory()
            used_mb = total_mb - free_mb
            stats_output += f"\n**GPU**:\n- : {used_mb:.0f}/{total_mb:.0f} MB ({used_mb/total_mb*100:.1f}%)"

        return stage1_img if show_comparison else None, result_img, None if not show_comparison else stage2_img, text_output, stats_output

    finally:
        # 
        if temp_path.exists():
            temp_path.unlink()
        result_path = temp_path.parent / f"{temp_path.stem}_result.png"
        if result_path.exists():
            result_path.unlink()


# ============================================================================
# Tab 2: Features
# ============================================================================

def visualize_features(image, feature_type, layer_name, num_channels):
    """Visualize feature maps"""
    if image is None:
        return None, "Please upload an image"

    try:
        # 
        transform = transforms.Compose([
            transforms.Resize((STAGE2_CONFIG['roi_size'], STAGE2_CONFIG['roi_size'])),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])

        image_tensor = transform(image).unsqueeze(0).to(DEVICE)

        # dummy ROI position
        roi_position = torch.tensor([[0.5, 0.5, 1.0, 1.0]], device=DEVICE)

        # 
        if feature_type == "OverLoCK":
            features = feature_visualizer.visualize_overlock_features(
                image_tensor,
                roi_position,
                num_channels=int(num_channels)
            )
        else:  # DINOv2
            features = feature_visualizer.visualize_dino_features(
                image_tensor,
                roi_position,
                num_channels=int(num_channels)
            )

        # Get specified layer
        if layer_name == "All":
            # Return first layer
            if features:
                first_layer = list(features.keys())[0]
                info = f": {first_layer}\n: {', '.join(features.keys())}"
                return features[first_layer], info
            else:
                return None, " "
        else:
            if layer_name in features:
                info = f": {layer_name}\n: {num_channels} "
                return features[layer_name], info
            else:
                available = ', '.join(features.keys())
                return None, f"  '{layer_name}' \n: {available}"

    except Exception as e:
        logger.error(f"Feature visualization error: {e}", exc_info=True)
        return None, f" : {str(e)}"


# ============================================================================
# Tab 3: Grad-CAM
# ============================================================================

def generate_gradcam(image, target_class, method, alpha):
    """Generate Grad-CAM"""
    if image is None:
        return None, "Please upload an image"

    try:
        # 
        transform = transforms.Compose([
            transforms.Resize((STAGE2_CONFIG['roi_size'], STAGE2_CONFIG['roi_size'])),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])

        image_tensor = transform(image).unsqueeze(0).to(DEVICE)
        roi_position = torch.tensor([[0.5, 0.5, 1.0, 1.0]], device=DEVICE)

        # 
        target_class_idx = None
        if target_class != "":
            target_class_idx = class_names.index(target_class) + 1  # +10

        # Grad-CAM
        method_name = 'gradcam++' if method == "Grad-CAM++" else 'gradcam'
        vis_img, cam = gradcam_visualizer.visualize_gradcam(
            image_tensor,
            roi_position,
            target_class=target_class_idx,
            method=method_name
        )

        # 
        info = f"""
### Grad-CAM 

- : {method}
- : {target_class}
- : {alpha}
- CAM: [{cam.min():.3f}, {cam.max():.3f}]
"""

        return vis_img, info

    except Exception as e:
        logger.error(f"Grad-CAM error: {e}", exc_info=True)
        return None, f" Grad-CAM: {str(e)}"


# ============================================================================
# Tab 4: Analysis
# ============================================================================

def generate_statistics(stat_type):
    """Generate statistics chart"""
    global latest_detections

    if len(latest_detections) == 0:
        placeholder = Image.new('RGB', (800, 600), color='white')
        return placeholder, "Please run detection in Detection tab first, then return to this tab to view statistics"

    try:
        if stat_type == "Class Distribution":
            img = statistics_plotter.plot_class_distribution(latest_detections, class_names)
            info = f"Detected {len(latest_detections)} objects"

        elif stat_type == "Confidence Distribution":
            img = statistics_plotter.plot_confidence_distribution(latest_detections)
            confidences = [d['confidence'] for d in latest_detections]
            info = f"Average Confidence: {np.mean(confidences):.3f}"

        elif stat_type == "ROI Size Distribution":
            img = statistics_plotter.plot_roi_size_distribution(latest_detections)
            info = f"ROI count: {len(latest_detections)}"

        elif stat_type == "Summary Statistics":
            img = statistics_plotter.plot_summary_stats(latest_detections, class_names)
            info = "Complete statistics panel"

        else:
            return None, "Unknown statistics type"

        return img, info

    except Exception as e:
        logger.error(f"Statistics error: {e}", exc_info=True)
        return None, f" : {str(e)}"


# ============================================================================
# UI Construction
# ============================================================================

def create_detection_tab():
    """Tab 1: Detection"""
    with gr.Tab("Detection"):
        gr.Markdown("""
        ## Defect Detection
        Upload image for intelligent defect detection with optional three-image comparison.
        """)

        with gr.Row():
            with gr.Column(scale=1):
                gr.Markdown("### Input")
                input_image = gr.Image(type="pil", label="Upload Image")

                with gr.Accordion("Settings", open=True):
                    conf_slider = gr.Slider(0.1, 0.9, value=0.25, step=0.05, label="Confidence Threshold")
                    use_ema = gr.Checkbox(label="Use EMA Model", value=True)
                    use_tta = gr.Checkbox(label="Use TTA", value=False)
                    show_comparison = gr.Checkbox(label="Show Three-Image Comparison", value=False)

                detect_btn = gr.Button("Start Detection", variant="primary", size="lg")

            with gr.Column(scale=1):
                gr.Markdown("### Results")

                with gr.Tab("Single Image Result"):
                    output_single = gr.Image(type="pil", label="Detection Result")

                with gr.Tab("Three-Image Comparison"):
                    with gr.Row():
                        output_stage1 = gr.Image(type="pil", label="Stage 1 (Proposals)")
                        output_stage2 = gr.Image(type="pil", label="Stage 2 (Final)")

                output_text = gr.Markdown("Waiting for detection...")

        with gr.Row():
            stats_output = gr.Markdown("")

        # 
        detect_btn.click(
            fn=predict_with_comparison,
            inputs=[input_image, conf_slider, use_ema, use_tta, show_comparison],
            outputs=[output_stage1, output_single, output_stage2, output_text, stats_output]
        )

    return input_image


def create_features_tab():
    """Tab 2: Feature Visualization"""
    with gr.Tab("Features"):
        gr.Markdown("""
        ## Feature Map Visualization
        View internal feature representations learned by the model.
        """)

        with gr.Row():
            with gr.Column(scale=1):
                gr.Markdown("### Input")
                feature_input = gr.Image(type="pil", label="Upload Image")

                with gr.Accordion("Settings", open=True):
                    feature_type = gr.Radio(
                        choices=["OverLoCK", "DINOv2"],
                        value="OverLoCK",
                        label="Feature Type"
                    )
                    layer_selector = gr.Dropdown(
                        choices=["All", "stem", "stage1", "stage2", "stage3", "block_0", "block_3"],
                        value="All",
                        label="Select Layer"
                    )
                    num_channels = gr.Slider(8, 64, value=32, step=8, label="Number of Channels")

                feature_btn = gr.Button("Visualize Features", variant="primary", size="lg")

            with gr.Column(scale=1):
                gr.Markdown("### Feature Maps")
                feature_output = gr.Image(type="pil", label="Feature Visualization")
                feature_info = gr.Markdown("")

        # 
        feature_btn.click(
            fn=visualize_features,
            inputs=[feature_input, feature_type, layer_selector, num_channels],
            outputs=[feature_output, feature_info]
        )


def create_gradcam_tab():
    """Tab 3: Grad-CAM"""
    with gr.Tab("Grad-CAM"):
        gr.Markdown("""
        ## Class Activation Mapping
        Visualize regions the model focuses on to understand decision-making process.
        """)

        with gr.Row():
            with gr.Column(scale=1):
                gr.Markdown("### Input")
                gradcam_input = gr.Image(type="pil", label="Upload Image")

                with gr.Accordion("Settings", open=True):
                    target_class = gr.Dropdown(
                        choices=["Auto Detect"] + class_names,
                        value="Auto Detect",
                        label="Target Class"
                    )
                    gradcam_method = gr.Radio(
                        choices=["Grad-CAM", "Grad-CAM++"],
                        value="Grad-CAM",
                        label="Method"
                    )
                    alpha = gr.Slider(0.0, 1.0, value=0.5, step=0.1, label="Heatmap Transparency")

                gradcam_btn = gr.Button("Generate Grad-CAM", variant="primary", size="lg")

            with gr.Column(scale=1):
                gr.Markdown("### Grad-CAM Results")
                gradcam_output = gr.Image(type="pil", label="Grad-CAM Visualization")
                gradcam_info = gr.Markdown("")

        # 
        gradcam_btn.click(
            fn=generate_gradcam,
            inputs=[gradcam_input, target_class, gradcam_method, alpha],
            outputs=[gradcam_output, gradcam_info]
        )


def create_analysis_tab():
    """Tab 4: Statistical Analysis"""
    with gr.Tab("Analysis"):
        gr.Markdown("""
        ## Statistical Analysis
        Perform statistical analysis on detection results (run detection in Detection tab first).
        """)

        with gr.Row():
            with gr.Column(scale=1):
                gr.Markdown("### Chart Type")

                stat_type = gr.Radio(
                    choices=["Class Distribution", "Confidence Distribution", "ROI Size Distribution", "Summary Statistics"],
                    value="Summary Statistics",
                    label="Select Statistics Type"
                )

                generate_btn = gr.Button("Generate Statistics Chart", variant="primary", size="lg")

            with gr.Column(scale=1):
                gr.Markdown("### Statistics Results")
                stat_output = gr.Image(type="pil", label="Statistics Chart")
                stat_info = gr.Markdown("")

        # 
        generate_btn.click(
            fn=generate_statistics,
            inputs=[stat_type],
            outputs=[stat_output, stat_info]
        )


def create_batch_tab():
    """Tab 5: Batch Processing"""
    with gr.Tab("Batch"):
        gr.Markdown("""
        ## Batch Processing
        Upload multiple images for batch detection.
        """)

        gr.Markdown("**Feature under development** - Phase 3 implementation")


def create_ui():
    """Create complete UI"""
    with gr.Blocks(
        title="PRISM",
        theme=gr.themes.Soft(),
        css="""
        .gradio-container {
            max-width: 1600px !important;
        }
        """
    ) as demo:
        gr.Markdown("""
        # PRISM Defect Detection System
        **P**rogressive **R**ecognition and **I**ntelligent **S**emantic **M**apping

        Complete version: Detection | Feature Visualization | Grad-CAM | Statistical Analysis
        """)

        # System Information
        with gr.Accordion("System Information", open=False):
            system_info = gr.Markdown(get_system_info())
            refresh_btn = gr.Button("Refresh", size="sm")
            refresh_btn.click(fn=get_system_info, outputs=system_info)

        # Tabs
        create_detection_tab()
        create_features_tab()
        create_gradcam_tab()
        create_analysis_tab()
        create_batch_tab()

        gr.Markdown("""
        ---
        ### Usage Instructions

        **Tab 1 - Detection**: Upload image for defect detection, optional three-image comparison
        **Tab 2 - Features**: Visualize OverLoCK and DINOv2 intermediate layer features
        **Tab 3 - Grad-CAM**: Generate class activation maps to understand model decisions
        **Tab 4 - Analysis**: Statistical analysis of detection results (run detection in Tab 1 first)
        **Tab 5 - Batch**: Batch process multiple images (under development)

        **Tip**: System supports multi-user concurrency, requests are automatically queued
        """)

    return demo


if __name__ == '__main__':
    # 
    initialize_system()

    # 
    demo = create_ui()
    demo.queue(max_size=20)
    demo.launch(
        server_name="0.0.0.0",
        server_port=7860,
        share=False,
        show_error=True
    )
