import io
import uuid
import time
import logging
from typing import Optional, Tuple, Dict, Any
from PIL import Image
import torch
from diffusers import (
    ControlNetModel,
    StableDiffusionControlNetPipeline,
    DPMSolverMultistepScheduler
)
import numpy as np
import cv2
from app.core.config import settings

logger = logging.getLogger(__name__)


class ColorizationService:
    """Service for AI-powered image colorization with production-grade error handling"""
    
    def __init__(self):
        self.device = self._get_device()
        self.pipeline = None
        self.controlnet = None
        self._model_loaded = False
        self._load_model()
    
    def _get_device(self) -> str:
        """Determine the best available device with fallback handling"""
        try:
            if settings.device != "auto":
                return settings.device
            
            if torch.cuda.is_available():
                gpu_count = torch.cuda.device_count()
                if gpu_count > 0:
                    logger.info(f"Found {gpu_count} CUDA devices")
                    return "cuda"
            elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
                logger.info("Using Apple MPS device")
                return "mps"
            else:
                logger.info("Using CPU device")
                return "cpu"
        except Exception as e:
            logger.warning(f"Error detecting device, falling back to CPU: {e}")
            return "cpu"
    
    def _load_model(self):
        """Load the AI model with comprehensive error handling"""
        try:
            logger.info(f"Loading model on device: {self.device}")
            
            # Load ControlNet for better control
            self.controlnet = ControlNetModel.from_pretrained(
                settings.controlnet_model,
                torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
                cache_dir=settings.model_path,
                local_files_only=False
            )
            
            # Load the main pipeline
            self.pipeline = StableDiffusionControlNetPipeline.from_pretrained(
                settings.model_name,
                controlnet=self.controlnet,
                torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
                cache_dir=settings.model_path,
                local_files_only=False,
                safety_checker=None,
                requires_safety_checker=False
            )
            
            # Optimize for speed
            self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
                self.pipeline.scheduler.config
            )
            
            # Memory optimizations
            if hasattr(self.pipeline, "enable_xformers_memory_efficient_attention"):
                try:
                    self.pipeline.enable_xformers_memory_efficient_attention()
                    logger.info("Enabled xFormers memory efficient attention")
                except Exception as e:
                    logger.warning(f"Could not enable xFormers: {e}")
            
            # Enable model offloading for low VRAM
            if self.device == "cuda":
                try:
                    self.pipeline.enable_model_cpu_offload()
                    logger.info("Enabled model CPU offloading")
                except Exception as e:
                    logger.warning(f"Could not enable CPU offloading: {e}")
            
            # Compile model if enabled
            if settings.torch_compile and self.device == "cuda":
                try:
                    self.pipeline.unet = torch.compile(
                        self.pipeline.unet,
                        mode=settings.compile_mode,
                        fullgraph=True
                    )
                    logger.info("Compiled model with torch.compile")
                except Exception as e:
                    logger.warning(f"Could not compile model: {e}")
            
            self._model_loaded = True
            logger.info("Model loaded successfully")
            
        except Exception as e:
            logger.error(f"Failed to load model: {str(e)}")
            self._model_loaded = False
            raise RuntimeError(f"Model loading failed: {str(e)}")
    
    def is_model_loaded(self) -> bool:
        """Check if model is loaded and ready"""
        return self._model_loaded and self.pipeline is not None
    
    def preprocess_image(self, image: Image.Image) -> Image.Image:
        """Preprocess the input image with validation and optimization"""
        try:
            # Validate input
            if not image:
                raise ValueError("No image provided")
            
            # Convert to RGB if necessary
            if image.mode not in ["RGB", "RGBA"]:
                image = image.convert("RGB")
            
            # Handle RGBA with transparency
            if image.mode == "RGBA":
                background = Image.new("RGB", image.size, (255, 255, 255))
                background.paste(image, mask=image.split()[-1])
                image = background
            
            # Validate image size
            width, height = image.size
            if width < 64 or height < 64:
                raise ValueError("Image too small, minimum 64x64 pixels")
            
            if width > 2048 or height > 2048:
                logger.warning("Image too large, resizing to 2048x2048")
                image.thumbnail((2048, 2048), Image.Resampling.LANCZOS)
            
            # Ensure minimum size for good results
            if width < 512 or height < 512:
                scale = max(512 / width, 512 / height)
                new_width = int(width * scale)
                new_height = int(height * scale)
                image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
            
            return image
            
        except Exception as e:
            logger.error(f"Image preprocessing failed: {str(e)}")
            raise ValueError(f"Invalid image: {str(e)}")
    
    def generate_canny_image(self, image: Image.Image) -> Image.Image:
        """Generate Canny edge detection image for ControlNet with error handling"""
        try:
            # Convert PIL to numpy array
            img_array = np.array(image)
            
            # Convert to BGR for OpenCV
            if len(img_array.shape) == 3:
                img_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
            else:
                img_bgr = img_array
            
            # Convert to grayscale
            gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY) if len(img_bgr.shape) == 3 else img_bgr
            
            # Apply Gaussian blur to reduce noise
            blurred = cv2.GaussianBlur(gray, (5, 5), 0)
            
            # Apply Canny edge detection
            canny = cv2.Canny(blurred, 50, 150)
            
            # Convert back to PIL
            canny_image = Image.fromarray(canny, mode='L')
            
            return canny_image
            
        except Exception as e:
            logger.error(f"Canny edge detection failed: {str(e)}")
            raise RuntimeError(f"Edge detection failed: {str(e)}")
    
    def validate_parameters(self, **kwargs) -> Dict[str, Any]:
        """Validate and sanitize input parameters"""
        validated = {}
        
        # Style validation
        style = kwargs.get('style', 'anime')
        valid_styles = ['anime', 'realistic', 'cartoon', 'sketch', 'watercolor']
        if style not in valid_styles:
            style = 'anime'
        validated['style'] = style
        
        # Strength validation (0.1-1.0)
        strength = float(kwargs.get('strength', 0.7))
        strength = max(0.1, min(1.0, strength))
        validated['strength'] = strength
        
        # Guidance scale validation (1.0-20.0)
        guidance_scale = float(kwargs.get('guidance_scale', 7.5))
        guidance_scale = max(1.0, min(20.0, guidance_scale))
        validated['guidance_scale'] = guidance_scale
        
        # Inference steps validation (10-50)
        num_inference_steps = int(kwargs.get('num_inference_steps', 20))
        num_inference_steps = max(10, min(50, num_inference_steps))
        validated['num_inference_steps'] = num_inference_steps
        
        # Seed validation
        seed = kwargs.get('seed')
        if seed is not None:
            try:
                seed = int(seed)
                if seed < 0:
                    seed = None
            except (ValueError, TypeError):
                seed = None
        validated['seed'] = seed
        
        return validated
    
    async def colorize(
        self,
        image: Image.Image,
        style: str = "anime",
        strength: float = 0.7,
        guidance_scale: float = 7.5,
        num_inference_steps: int = 20,
        seed: Optional[int] = None
    ) -> Tuple[Image.Image, Dict[str, Any]]:
        """
        Colorize a manga sketch with comprehensive error handling
        
        Args:
            image: Input manga sketch
            style: Colorization style
            strength: How much to modify the original (0.1-1.0)
            guidance_scale: How closely to follow the prompt
            num_inference_steps: Number of denoising steps
            seed: Random seed for reproducibility
            
        Returns:
            Tuple of (colored_image, metadata)
        """
        if not self.is_model_loaded():
            raise RuntimeError("Model not loaded")
        
        start_time = time.time()
        
        try:
            # Validate parameters
            params = self.validate_parameters(
                style=style,
                strength=strength,
                guidance_scale=guidance_scale,
                num_inference_steps=num_inference_steps,
                seed=seed
            )
            
            # Preprocess the image
            processed_image = self.preprocess_image(image)
            
            # Generate Canny image for ControlNet
            canny_image = self.generate_canny_image(processed_image)
            
            # Set seed for reproducibility
            if params['seed'] is not None:
                torch.manual_seed(params['seed'])
                if torch.cuda.is_available():
                    torch.cuda.manual_seed_all(params['seed'])
            
            # Create prompt based on style
            style_prompts = {
                "anime": "anime style, vibrant colors, detailed manga illustration, high quality",
                "realistic": "realistic colors, natural lighting, photographic quality",
                "cartoon": "cartoon style, bright colors, cel-shaded, clean lines",
                "sketch": "colored sketch, artistic style, watercolor effect",
                "watercolor": "watercolor painting style, soft colors, artistic rendering"
            }
            
            prompt = style_prompts.get(params['style'], style_prompts["anime"])
            negative_prompt = (
                "low quality, blurry, distorted, ugly, bad anatomy, "
                "bad proportions, extra limbs, cloned face, disfigured"
            )
            
            # Generate the colored image
            with torch.no_grad():
                result = self.pipeline(
                    prompt=prompt,
                    negative_prompt=negative_prompt,
                    image=canny_image,
                    control_image=canny_image,
                    strength=params['strength'],
                    guidance_scale=params['guidance_scale'],
                    num_inference_steps=params['num_inference_steps'],
                    generator=torch.Generator(device=self.device).manual_seed(
                        params['seed'] or torch.seed()
                    ),
                    output_type="pil"
                ).images[0]
            
            # Post-process to match original size
            if result.size != processed_image.size:
                result = result.resize(processed_image.size, Image.Resampling.LANCZOS)
            
            processing_time = time.time() - start_time
            
            metadata = {
                "style": params['style'],
                "strength": params['strength'],
                "guidance_scale": params['guidance_scale'],
                "num_inference_steps": params['num_inference_steps'],
                "seed": params['seed'],
                "processing_time": processing_time,
                "original_size": image.size,
                "output_size": result.size,
                "device": self.device
            }
            
            logger.info(f"Colorization completed in {processing_time:.2f}s")
            return result, metadata
            
        except torch.cuda.OutOfMemoryError as e:
            logger.error(f"CUDA out of memory: {e}")
            raise RuntimeError("Insufficient GPU memory. Please try with a smaller image or use CPU.")
        except Exception as e:
            logger.error(f"Colorization failed: {str(e)}")
            raise RuntimeError(f"Colorization failed: {str(e)}")
    
    def colorize_sync(
        self,
        image_bytes: bytes,
        **kwargs
    ) -> Tuple[bytes, Dict[str, Any]]:
        """Synchronous colorization for Celery tasks with error handling"""
        try:
            # Load image from bytes
            image = Image.open(io.BytesIO(image_bytes))
            
            # Colorize the image
            colored_image, metadata = self.colorize(image, **kwargs)
            
            # Convert back to bytes
            output_buffer = io.BytesIO()
            colored_image.save(output_buffer, format="PNG", quality=95, optimize=True)
            output_bytes = output_buffer.getvalue()
            
            return output_bytes, metadata
            
        except Exception as e:
            logger.error(f"Sync colorization failed: {str(e)}")
            raise RuntimeError(f"Colorization failed: {str(e)}")


# Global service instance with lazy loading
colorization_service = ColorizationService()
