import base64
import io
import time
from contextlib import asynccontextmanager
from pathlib import Path

import torch
from fastapi import FastAPI, File, HTTPException, Request, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from PIL import Image
from pydantic import BaseModel
from pydantic.alias_generators import to_camel
from ultralytics import YOLO

# Global model variables
person_model = None
sleep_model = None

@asynccontextmanager
async def lifespan(app: FastAPI):
    """Lifespan context manager for startup and shutdown events"""
    # Startup
    success = load_models()
    if not success:
        print("Warning: Models could not be loaded on startup")
    yield
    # Shutdown (if needed, add cleanup code here)
    pass

app = FastAPI(
    title="Duty Monitor CV Service",
    description="Image recognition service for detecting sleeping persons",
    version="1.0.0",
    root_path="/duty-monitor/cv/api",  # Context path for the API
    lifespan=lifespan
)

# Add CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Add timing middleware
@app.middleware("http")
async def timing_middleware(request: Request, call_next):
    """Middleware to track request processing time"""
    start_time = time.time()
    response = await call_next(request)
    process_time = time.time() - start_time
    
    print(f"Request: {request.method} {request.url.path} - Time: {process_time:.4f}s")
    return response


class ResponseBody(BaseModel):
    model_config = {"alias_generator": to_camel, "populate_by_name": True}
    
    code: int
    msg: str
    data: dict | None = None

class Detection(BaseModel):
    model_config = {"alias_generator": to_camel, "populate_by_name": True}
    
    has_person: bool
    has_sleeping_person: bool
    person_confidence: float
    sleeping_person_confidence: float
    image_base64: str
    message: str

class Health(BaseModel):
    model_config = {"alias_generator": to_camel, "populate_by_name": True}
    
    status: str
    model_loaded: bool

def load_models():
    """Load both person detection and sleep detection models"""
    global person_model, sleep_model
    
    # Load person detection model
    person_success = load_person_model()
    
    # Load sleep detection model
    sleep_success = load_sleep_model()
    
    return person_success and sleep_success

def load_person_model():
    """Load the person detection model"""
    global person_model
    try:
        # Use YOLOv11n for general person detection
        weights_path = Path("weights/yolo11n.pt")
        if not weights_path.exists():
            raise FileNotFoundError("Person detection model weights not found")
        
        person_model = YOLO(str(weights_path))
        print(f"Person detection model loaded successfully from {weights_path}")
        return True
    except Exception as e:
        print(f"Error loading person detection model: {e}")
        return False

def load_sleep_model():
    """Load the sleep detection model"""
    global sleep_model
    try:
        # First try the custom trained sleep detection model
        weights_path = Path("weights/best.pt")
        if not weights_path.exists():
            # Try ONNX format as fallback
            weights_path = Path("weights/best.onnx")
            if not weights_path.exists():
                # If no custom sleep model, we'll use person model with different logic
                print("No custom sleep detection model found, will use person model with pose analysis")
                sleep_model = None
                return True
        
        sleep_model = YOLO(str(weights_path))
        print(f"Sleep detection model loaded successfully from {weights_path}")
        return True
    except Exception as e:
        print(f"Error loading sleep detection model: {e}")
        # Don't fail completely if sleep model fails, we can still do person detection
        sleep_model = None
        return True

def get_device():
    """Determine the best available device"""
    if torch.backends.mps.is_available():
        return "mps"
    elif torch.cuda.is_available():
        return "0"
    else:
        return "cpu"

def process_image(image_bytes: bytes) -> tuple[bool, bool, float, float, str]:
    """
    Process image and detect persons and sleeping persons using separate models
    Only runs sleep detection if persons are detected first
    Returns: (has_person, has_sleeping_person, person_confidence, sleeping_person_confidence, base64_image)
    """
    if person_model is None:
        raise HTTPException(status_code=500, detail="Person detection model not loaded")
    
    try:
        # Load image from bytes
        image = Image.open(io.BytesIO(image_bytes))
        
        # Convert to RGB if necessary
        if image.mode != 'RGB':
            image = image.convert('RGB')
        
        device = get_device()
        
        # Step 1: Person detection using person detection model
        has_person, max_person_confidence, person_results = detect_persons(image, device)
        
        # Step 2: Only run sleep detection if persons are detected
        has_sleeping_person = False
        max_sleeping_person_confidence = 0.0
        sleep_results = None
        
        if has_person:
            # Only detect sleeping persons if persons are present
            has_sleeping_person, max_sleeping_person_confidence, sleep_results = detect_sleeping_persons(image, device)
        
        # Use sleep detection results for annotation if available, otherwise use person detection results
        if sleep_results is not None:
            annotated_image = sleep_results[0].plot()
        else:
            annotated_image = person_results[0].plot()
        
        # Convert annotated image to base64
        pil_image = Image.fromarray(annotated_image)
        
        # Convert to base64
        buffer = io.BytesIO()
        pil_image.save(buffer, format="JPEG")
        img_base64 = base64.b64encode(buffer.getvalue()).decode()
        
        return has_person, has_sleeping_person, max_person_confidence, max_sleeping_person_confidence, img_base64
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error processing image: {str(e)}")

def detect_persons(image, device):
    """Detect persons in the image using person detection model"""
    has_person = False
    max_person_confidence = 0.0
    
    results = person_model.predict(
        source=image,
        conf=0.25,  # Lower confidence threshold to detect more people
        device=device,
        save=False,
        verbose=False
    )
    
    for result in results:
        if result.boxes is not None and len(result.boxes) > 0:
            for box in result.boxes:
                class_id = int(box.cls[0])
                confidence = float(box.conf[0])
                
                # Check for person detection (class 0 in COCO dataset)
                if class_id == 0:  # Person class
                    has_person = True
                    max_person_confidence = max(max_person_confidence, confidence)
    
    return has_person, max_person_confidence, results

def detect_sleeping_persons(image, device):
    """Detect sleeping persons in the image using sleep detection model"""
    has_sleeping_person = False
    max_sleeping_person_confidence = 0.0
    
    if sleep_model is None:
        # If no sleep model, use simple heuristic based on person detection
        # This is a fallback - ideally you should have a trained sleep detection model
        print("No sleep detection model available, using heuristic approach")
        return has_sleeping_person, max_sleeping_person_confidence, None
    
    results = sleep_model.predict(
        source=image,
        conf=0.7,
        device=device,
        save=False,
        verbose=False
    )
    
    for result in results:
        if result.boxes is not None and len(result.boxes) > 0:
            class_names = sleep_model.names
            
            for box in result.boxes:
                class_id = int(box.cls[0])
                confidence = float(box.conf[0])
                class_name = class_names.get(class_id, "unknown")
                
                # Check if this is a sleeping person class
                # Adjust this logic based on your custom model's class definitions
                if "sleep" in class_name.lower() or class_id == 1:  # Assuming class 1 is sleeping person
                    has_sleeping_person = True
                    max_sleeping_person_confidence = max(max_sleeping_person_confidence, confidence)
    
    return has_sleeping_person, max_sleeping_person_confidence, results


@app.get("/health", response_model=ResponseBody)
async def health_check():
    """Health check endpoint"""
    person_model_loaded = person_model is not None
    sleep_model_loaded = sleep_model is not None
    is_healthy = person_model_loaded  # At minimum, person detection must work
    
    status_msg = "Service is healthy"
    if not person_model_loaded:
        status_msg = "Service is unhealthy - person detection model not loaded"
    elif not sleep_model_loaded:
        status_msg = "Service is partially healthy - person detection works, sleep detection unavailable"
    
    return ResponseBody(
        code=0 if is_healthy else 503,
        msg=status_msg,
        data=Health(
            status="healthy" if is_healthy else "unhealthy",
            model_loaded=is_healthy
        ).model_dump(by_alias=True)
    )

@app.post("/detect", response_model=ResponseBody)
async def detect_sleeping_person(file: UploadFile = File(...)):
    """
    Detect sleeping person in uploaded image
    """
    # Validate file type
    if not file.content_type or not file.content_type.startswith('image/'):
        return ResponseBody(
            code=400,
            msg="Invalid file type. File must be an image.",
            data=None
        )
    
    # Check file size (limit to 10MB)
    if file.size and file.size > 10 * 1024 * 1024:
        return ResponseBody(
            code=400,
            msg="File size too large. Maximum size is 10MB.",
            data=None
        )
    
    try:
        # Read image bytes
        image_bytes = await file.read()
        
        # Process image
        has_person, has_sleeping_person, person_confidence, sleeping_person_confidence, img_base64 = process_image(image_bytes)
        
        # Prepare response message
        if has_person:
            if has_sleeping_person:
                message = f"Person detected with {person_confidence:.2f} confidence, sleeping person detected with {sleeping_person_confidence:.2f} confidence"
            else:
                message = f"Person detected with {person_confidence:.2f} confidence, but no sleeping person detected"
        else:
            message = "No person detected in the image - sleep detection skipped"
        
        return ResponseBody(
            code=0,
            msg="Detection completed successfully",
            data=Detection(
                has_person=has_person,
                has_sleeping_person=has_sleeping_person,
                person_confidence=person_confidence,
                sleeping_person_confidence=sleeping_person_confidence,
                image_base64=img_base64,
                message=message
            ).model_dump(by_alias=True)
        )
        
    except Exception as e:
        return ResponseBody(
            code=500,
            msg=f"Internal server error: {str(e)}",
            data=None
        )

if __name__ == "__main__":
    import uvicorn
    
    uvicorn.run("app:app", host="0.0.0.0", port=9011, reload=True)
