"""
FastAPI Application for PaddleOCR Video Processing
"""
import os
import tempfile
import logging
from pathlib import Path
from typing import Optional
import json
import numpy as np

from fastapi import FastAPI, File, UploadFile, HTTPException, BackgroundTasks, APIRouter
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
import uvicorn

from .ocr_processor import OCRProcessor
from .video_processor import VideoProcessor, sanitize_for_json
from .config import ALLOWED_VIDEO_FORMATS, MAX_VIDEO_SIZE_MB

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Initialize FastAPI app
app = FastAPI(
    title="PaddleOCR Video Processing API",
    description="API for performing OCR on video files using PaddleOCR and OpenVINO",
    version="1.0.0",
    docs_url="/api/docs",
    redoc_url="/api/redoc",
    openapi_url="/api/openapi.json"
)

# Add CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Create API router
api_router = APIRouter(prefix="/api")

# Global OCR processor (initialized once)
ocr_processor = None
video_processor = None


@app.on_event("startup")
async def startup_event():
    """Initialize models on startup"""
    global ocr_processor, video_processor
    logger.info("Initializing OCR models...")
    try:
        ocr_processor = OCRProcessor()
        video_processor = VideoProcessor(ocr_processor)
        logger.info("Models initialized successfully!")
    except Exception as e:
        logger.error(f"Error initializing models: {str(e)}", exc_info=True)
        raise


@api_router.get("/")
async def root():
    """Root endpoint"""
    return {
        "message": "PaddleOCR Video Processing API",
        "version": "1.0.0",
        "endpoints": {
            "health": "/api/health",
            "process_video": "/api/process-video",
            "process_video_with_output": "/api/process-video-with-output",
            "process_video_text_only": "/api/process-video-text-only",
            "process_video_stream": "/api/process-video-stream",
            "process_image_stream": "/api/process-image-stream"
        }
    }


@api_router.get("/health")
async def health_check():
    """Health check endpoint"""
    return {
        "status": "healthy",
        "models_loaded": ocr_processor is not None
    }


@api_router.post("/process-video")
async def process_video(
    file: UploadFile = File(...),
    sample_rate: int = 1
):
    """
    Process video file and extract OCR results

    Args:
        file: Video file to process
        sample_rate: Process every Nth frame (default: 1 = all frames)

    Returns:
        JSON with OCR results for each frame
    """
    logger.info(f"Received request to process video: {file.filename}, sample_rate={sample_rate}")

    if ocr_processor is None:
        logger.error("OCR models not initialized")
        raise HTTPException(status_code=503, detail="OCR models not initialized")

    # Validate file format
    file_ext = Path(file.filename).suffix.lower()
    if file_ext not in ALLOWED_VIDEO_FORMATS:
        logger.warning(f"Unsupported video format: {file_ext}")
        raise HTTPException(
            status_code=400,
            detail=f"Unsupported video format. Allowed formats: {', '.join(ALLOWED_VIDEO_FORMATS)}"
        )

    # Validate file size
    file_content = await file.read()
    file_size_mb = len(file_content) / (1024 * 1024)
    await file.seek(0)

    logger.info(f"File size: {file_size_mb:.2f}MB")

    if file_size_mb > MAX_VIDEO_SIZE_MB:
        logger.warning(f"File too large: {file_size_mb:.2f}MB > {MAX_VIDEO_SIZE_MB}MB")
        raise HTTPException(
            status_code=413,
            detail=f"File too large. Maximum size: {MAX_VIDEO_SIZE_MB}MB"
        )

    # Save uploaded file temporarily
    temp_dir = tempfile.gettempdir()
    temp_video_path = os.path.join(temp_dir, file.filename)

    try:
        # Save file
        logger.info(f"Saving file to: {temp_video_path}")
        with open(temp_video_path, "wb") as f:
            content = await file.read()
            f.write(content)

        logger.info(f"Starting video processing...")
        # Process video
        results = video_processor.process_video(temp_video_path, sample_rate=sample_rate)

        logger.info(f"Video processing completed successfully")
        # Sanitize results and return as JSON
        results = sanitize_for_json(results)
        return JSONResponse(content=results)

    except Exception as e:
        logger.error(f"Error processing video: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"Error processing video: {str(e)}")

    finally:
        # Clean up temp file
        if os.path.exists(temp_video_path):
            logger.info(f"Cleaning up temp file: {temp_video_path}")
            os.remove(temp_video_path)


@api_router.post("/process-video-with-output")
async def process_video_with_output(
    file: UploadFile = File(...),
    sample_rate: int = 1,
    background_tasks: BackgroundTasks = BackgroundTasks()
):
    """
    Process video and return output video with OCR results drawn

    Args:
        file: Video file to process
        sample_rate: Process every Nth frame (default: 1 = all frames)

    Returns:
        Output video file with OCR results
    """
    logger.info(f"Received request to process video with output: {file.filename}, sample_rate={sample_rate}")

    if ocr_processor is None:
        logger.error("OCR models not initialized")
        raise HTTPException(status_code=503, detail="OCR models not initialized")

    # Validate file format
    file_ext = Path(file.filename).suffix.lower()
    if file_ext not in ALLOWED_VIDEO_FORMATS:
        logger.warning(f"Unsupported video format: {file_ext}")
        raise HTTPException(
            status_code=400,
            detail=f"Unsupported video format. Allowed formats: {', '.join(ALLOWED_VIDEO_FORMATS)}"
        )

    # Validate file size
    file_content = await file.read()
    file_size_mb = len(file_content) / (1024 * 1024)
    await file.seek(0)

    logger.info(f"File size: {file_size_mb:.2f}MB")

    if file_size_mb > MAX_VIDEO_SIZE_MB:
        logger.warning(f"File too large: {file_size_mb:.2f}MB > {MAX_VIDEO_SIZE_MB}MB")
        raise HTTPException(
            status_code=413,
            detail=f"File too large. Maximum size: {MAX_VIDEO_SIZE_MB}MB"
        )

    # Create temp files
    temp_dir = tempfile.gettempdir()
    temp_video_path = os.path.join(temp_dir, f"input_{file.filename}")
    output_video_path = os.path.join(temp_dir, f"output_{file.filename}")

    try:
        # Save input file
        logger.info(f"Saving input file to: {temp_video_path}")
        with open(temp_video_path, "wb") as f:
            content = await file.read()
            f.write(content)

        logger.info(f"Starting video processing with output...")
        # Process video
        results = video_processor.process_video_with_output(
            temp_video_path, output_video_path, sample_rate=sample_rate
        )

        logger.info(f"Video processing completed successfully")

        # Schedule cleanup
        background_tasks.add_task(cleanup_files, temp_video_path, output_video_path)

        # Return output video
        return FileResponse(
            output_video_path,
            media_type="video/mp4",
            filename=f"ocr_{file.filename}"
        )

    except Exception as e:
        logger.error(f"Error processing video: {str(e)}", exc_info=True)
        # Clean up on error
        if os.path.exists(temp_video_path):
            logger.info(f"Cleaning up temp input file: {temp_video_path}")
            os.remove(temp_video_path)
        if os.path.exists(output_video_path):
            logger.info(f"Cleaning up temp output file: {output_video_path}")
            os.remove(output_video_path)

        raise HTTPException(status_code=500, detail=f"Error processing video: {str(e)}")


@api_router.post("/process-video-text-only")
async def process_video_text_only(
    file: UploadFile = File(...),
    sample_rate: int = 1,
    min_score: float = 0.0
):
    """
    Process video and extract only recognized text segments.
    Returns a clean list of text without coordinates, scores, or other metadata.

    Args:
        file: Video file to process
        sample_rate: Process every Nth frame (default: 1 = all frames)
        min_score: Minimum confidence score to include text (0.0 to 1.0, default: 0.0)

    Returns:
        JSON with only recognized text segments organized by frame
    """
    logger.info(f"Received request to process video (text-only): {file.filename}, sample_rate={sample_rate}, min_score={min_score}")

    if ocr_processor is None:
        logger.error("OCR models not initialized")
        raise HTTPException(status_code=503, detail="OCR models not initialized")

    # Validate file format
    file_ext = Path(file.filename).suffix.lower()
    if file_ext not in ALLOWED_VIDEO_FORMATS:
        logger.warning(f"Unsupported video format: {file_ext}")
        raise HTTPException(
            status_code=400,
            detail=f"Unsupported video format. Allowed formats: {', '.join(ALLOWED_VIDEO_FORMATS)}"
        )

    # Validate file size
    file_content = await file.read()
    file_size_mb = len(file_content) / (1024 * 1024)
    await file.seek(0)

    logger.info(f"File size: {file_size_mb:.2f}MB")

    if file_size_mb > MAX_VIDEO_SIZE_MB:
        logger.warning(f"File too large: {file_size_mb:.2f}MB > {MAX_VIDEO_SIZE_MB}MB")
        raise HTTPException(
            status_code=413,
            detail=f"File too large. Maximum size: {MAX_VIDEO_SIZE_MB}MB"
        )

    # Validate min_score parameter
    if not (0.0 <= min_score <= 1.0):
        logger.warning(f"Invalid min_score: {min_score}")
        raise HTTPException(
            status_code=400,
            detail="min_score must be between 0.0 and 1.0"
        )

    # Save uploaded file temporarily
    temp_dir = tempfile.gettempdir()
    temp_video_path = os.path.join(temp_dir, file.filename)

    try:
        # Save file
        logger.info(f"Saving file to: {temp_video_path}")
        with open(temp_video_path, "wb") as f:
            content = await file.read()
            f.write(content)

        logger.info(f"Starting text-only video processing...")
        # Process video
        results = video_processor.process_video_text_only(
            temp_video_path, sample_rate=sample_rate, min_score=min_score
        )

        logger.info(f"Text-only video processing completed successfully")
        # Sanitize results and return as JSON
        results = sanitize_for_json(results)
        return JSONResponse(content=results)

    except Exception as e:
        logger.error(f"Error processing video: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"Error processing video: {str(e)}")

    finally:
        # Clean up temp file
        if os.path.exists(temp_video_path):
            logger.info(f"Cleaning up temp file: {temp_video_path}")
            os.remove(temp_video_path)


@api_router.post("/process-video-stream")
async def process_video_stream(
    file: UploadFile = File(...),
    sample_rate: int = 1,
    min_score: float = 0.0
):
    """
    Process video and stream OCR results in real-time using Server-Sent Events (SSE).
    Results are streamed as they are processed, allowing real-time monitoring.

    Args:
        file: Video file to process
        sample_rate: Process every Nth frame (default: 1 = all frames)
        min_score: Minimum confidence score to include text (0.0 to 1.0, default: 0.0)

    Returns:
        Server-Sent Events stream with OCR results for each frame
    """
    logger.info(f"Received request to stream video: {file.filename}, sample_rate={sample_rate}, min_score={min_score}")

    if ocr_processor is None:
        logger.error("OCR models not initialized")
        raise HTTPException(status_code=503, detail="OCR models not initialized")

    # Validate file format
    file_ext = Path(file.filename).suffix.lower()
    if file_ext not in ALLOWED_VIDEO_FORMATS:
        logger.warning(f"Unsupported video format: {file_ext}")
        raise HTTPException(
            status_code=400,
            detail=f"Unsupported video format. Allowed formats: {', '.join(ALLOWED_VIDEO_FORMATS)}"
        )

    # Validate file size
    file_content = await file.read()
    file_size_mb = len(file_content) / (1024 * 1024)
    await file.seek(0)

    logger.info(f"File size: {file_size_mb:.2f}MB")

    if file_size_mb > MAX_VIDEO_SIZE_MB:
        logger.warning(f"File too large: {file_size_mb:.2f}MB > {MAX_VIDEO_SIZE_MB}MB")
        raise HTTPException(
            status_code=413,
            detail=f"File too large. Maximum size: {MAX_VIDEO_SIZE_MB}MB"
        )

    # Validate min_score parameter
    if not (0.0 <= min_score <= 1.0):
        logger.warning(f"Invalid min_score: {min_score}")
        raise HTTPException(
            status_code=400,
            detail="min_score must be between 0.0 and 1.0"
        )

    # Save uploaded file temporarily
    temp_dir = tempfile.gettempdir()
    temp_video_path = os.path.join(temp_dir, file.filename)

    try:
        # Save file
        logger.info(f"Saving file to: {temp_video_path}")
        with open(temp_video_path, "wb") as f:
            content = await file.read()
            f.write(content)

        logger.info(f"Starting streaming video processing...")

        # Create generator for streaming
        def event_generator():
            try:
                for event in video_processor.process_video_stream(
                    temp_video_path, sample_rate=sample_rate, min_score=min_score
                ):
                    yield event
            finally:
                # Clean up temp file after streaming completes
                if os.path.exists(temp_video_path):
                    logger.info(f"Cleaning up temp file: {temp_video_path}")
                    os.remove(temp_video_path)

        # Return streaming response with SSE content type
        return StreamingResponse(
            event_generator(),
            media_type="text/event-stream",
            headers={
                "Cache-Control": "no-cache",
                "X-Accel-Buffering": "no"
            }
        )

    except Exception as e:
        logger.error(f"Error processing video stream: {str(e)}", exc_info=True)
        # Clean up on error
        if os.path.exists(temp_video_path):
            logger.info(f"Cleaning up temp file: {temp_video_path}")
            os.remove(temp_video_path)
        raise HTTPException(status_code=500, detail=f"Error processing video: {str(e)}")


@api_router.post("/process-image-stream")
async def process_image_stream(
    file: UploadFile = File(...),
    min_score: float = 0.0
):
    """
    Process image and stream OCR results using Server-Sent Events (SSE).
    This endpoint is optimized for camera captures and single images.

    Args:
        file: Image file to process
        min_score: Minimum confidence score to include text (0.0 to 1.0, default: 0.0)

    Returns:
        Server-Sent Events stream with OCR results
    """
    logger.info(f"Received request to stream image: {file.filename}, min_score={min_score}")

    if ocr_processor is None:
        logger.error("OCR models not initialized")
        raise HTTPException(status_code=503, detail="OCR models not initialized")

    # Validate min_score parameter
    if not (0.0 <= min_score <= 1.0):
        logger.warning(f"Invalid min_score: {min_score}")
        raise HTTPException(
            status_code=400,
            detail="min_score must be between 0.0 and 1.0"
        )

    # Save uploaded file temporarily
    temp_dir = tempfile.gettempdir()
    temp_image_path = os.path.join(temp_dir, file.filename)

    try:
        # Save file
        logger.info(f"Saving image to: {temp_image_path}")
        with open(temp_image_path, "wb") as f:
            content = await file.read()
            f.write(content)

        logger.info(f"Starting streaming image processing...")

        # Create generator for streaming
        def event_generator():
            try:
                import cv2
                import time

                start_time = time.time()

                # Read image
                image = cv2.imread(temp_image_path)
                if image is None:
                    raise ValueError(f"Cannot read image: {temp_image_path}")

                # Send metadata event
                metadata_event = {
                    "type": "metadata",
                    "filename": file.filename,
                    "image_shape": list(image.shape)
                }
                yield f"data: {json.dumps(metadata_event)}\n\n"

                # Process image
                dt_boxes, txts, scores = ocr_processor.process_frame(image)

                # Filter texts by minimum score
                frame_texts = []
                for txt, score in zip(txts, scores):
                    score_float = float(score)
                    # Handle NaN and Inf values
                    if np.isnan(score_float):
                        score_float = 0.0
                    elif np.isinf(score_float):
                        score_float = 1.0

                    # Only include text if score meets minimum threshold
                    if score_float >= min_score:
                        frame_texts.append({
                            "text": txt,
                            "confidence": score_float
                        })

                # Send frame result
                frame_result = {
                    "type": "frame",
                    "texts": frame_texts,
                    "text_count": len(frame_texts)
                }
                yield f"data: {json.dumps(frame_result)}\n\n"

                # Send complete event
                processing_time = time.time() - start_time
                complete_event = {
                    "type": "complete",
                    "total_texts": len(frame_texts),
                    "processing_time": processing_time
                }
                yield f"data: {json.dumps(complete_event)}\n\n"

            except Exception as e:
                logger.error(f"Error in image stream processing: {str(e)}", exc_info=True)
                error_event = {
                    "type": "error",
                    "message": str(e)
                }
                yield f"data: {json.dumps(error_event)}\n\n"
            finally:
                # Clean up temp file after streaming completes
                if os.path.exists(temp_image_path):
                    logger.info(f"Cleaning up temp image file: {temp_image_path}")
                    os.remove(temp_image_path)

        # Return streaming response with SSE content type
        return StreamingResponse(
            event_generator(),
            media_type="text/event-stream",
            headers={
                "Cache-Control": "no-cache",
                "X-Accel-Buffering": "no"
            }
        )

    except Exception as e:
        logger.error(f"Error processing image stream: {str(e)}", exc_info=True)
        # Clean up on error
        if os.path.exists(temp_image_path):
            logger.info(f"Cleaning up temp image file: {temp_image_path}")
            os.remove(temp_image_path)
        raise HTTPException(status_code=500, detail=f"Error processing image: {str(e)}")


# Register API router
app.include_router(api_router)

# Serve index.html at root for SPA support
web_dir = Path(__file__).parent.parent / "web"
if web_dir.exists():
    logger.info(f"Web frontend directory found at: {web_dir}")

    # Serve index.html at root
    @app.get("/", include_in_schema=False)
    async def serve_index():
        """Serve index.html at root"""
        index_path = web_dir / "index.html"
        if index_path.exists():
            return FileResponse(index_path, media_type="text/html")
        return {"message": "Web frontend not found"}

    # Mount static files for web frontend
    app.mount("/web", StaticFiles(directory=str(web_dir)), name="web")
else:
    logger.warning(f"Web directory not found at: {web_dir}")


def cleanup_files(*file_paths):
    """Clean up temporary files"""
    for file_path in file_paths:
        try:
            if os.path.exists(file_path):
                os.remove(file_path)
        except Exception as e:
            print(f"Error cleaning up {file_path}: {str(e)}")


if __name__ == "__main__":
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=8000,
        log_level="info"
    )

