import os
import torch
import numpy as np
from PIL import Image
import logging
from pathlib import Path
from typing import Optional, Dict, Union
from modelscope.pipelines import pipeline
from modelscope.outputs import OutputKeys
from modelscope.utils.constant import ModelFile

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("DCTNet")

class ModelLoadError(Exception):
    """Custom exception for model loading errors"""
    pass

class DCTNetCore:
    """
    Core functionality of DCT-Net for style transfer
    """
    MODEL_MAPPING = {
        'anime': 'damo/cv_unet_person-image-cartoon_compound-models',
        '3d': 'damo/cv_unet_person-image-cartoon-3d_compound-models',
        'handdrawn': 'damo/cv_unet_person-image-cartoon-handdrawn_compound-models',
        'sketch': 'damo/cv_unet_person-image-cartoon-sketch_compound-models',
        'artstyle': 'damo/cv_unet_person-image-cartoon-artstyle_compound-models',
        'sd-design': 'damo/cv_unet_person-image-cartoon-sd-design_compound-models',
        'sd-illustration': 'damo/cv_unet_person-image-cartoon-sd-illustration_compound-models'
    }
    
    def __init__(self, cache_dir: Optional[str] = None):
        """
        Initialize DCTNet core
        
        Args:
            cache_dir: Directory to cache downloaded models. If None, uses default modelscope cache.
        """
        self.models: Dict[str, pipeline] = {}
        self.current_style: Optional[str] = None
        self.cache_dir = cache_dir
        if cache_dir:
            os.environ['MODELSCOPE_CACHE'] = cache_dir
            Path(cache_dir).mkdir(parents=True, exist_ok=True)
    
    def validate_style(self, style: str) -> None:
        """Validate style name"""
        if style not in self.MODEL_MAPPING:
            raise ValueError(f"Invalid style '{style}'. Available styles: {list(self.MODEL_MAPPING.keys())}")
    
    def get_model_path(self, style: str) -> str:
        """Get local path for cached model"""
        self.validate_style(style)
        if self.cache_dir:
            return os.path.join(self.cache_dir, style, ModelFile.CONFIGURATION)
        return ""
    
    def is_model_cached(self, style: str) -> bool:
        """Check if model is already downloaded"""
        model_path = self.get_model_path(style)
        return model_path and os.path.exists(model_path)
    
    def load_model(self, style: str) -> pipeline:
        """
        Load model for specific style with progress tracking and caching
        
        Args:
            style: Style name from MODEL_MAPPING
            
        Returns:
            Loaded pipeline object
            
        Raises:
            ModelLoadError: If model loading fails
        """
        self.validate_style(style)
        
        if style in self.models:
            logger.info(f"Using cached model for style: {style}")
            return self.models[style]
        
        model_id = self.MODEL_MAPPING[style]
        logger.info(f"Loading model for style: {style}")
        
        try:
            # Check if CUDA is available
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
            logger.info(f"Using device: {device}")
            
            pipe = pipeline('cartoon-translation', model=model_id, device=device)
            self.models[style] = pipe
            self.current_style = style
            logger.info(f"Successfully loaded model for style: {style}")
            return pipe
            
        except Exception as e:
            error_msg = f"Failed to load model for style '{style}': {str(e)}"
            logger.error(error_msg)
            raise ModelLoadError(error_msg) from e
    
    def process_image(self, image: Union[np.ndarray, Image.Image], model: pipeline) -> np.ndarray:
        """
        Process image with loaded model
        
        Args:
            image: Input image as numpy array or PIL Image
            model: Loaded pipeline object
            
        Returns:
            Processed image as numpy array (float32, range 0-1)
            
        Raises:
            ValueError: If image format is invalid
            RuntimeError: If processing fails
        """
        try:
            # Input validation and conversion
            if isinstance(image, np.ndarray):
                if image.dtype == np.float32 and image.max() <= 1.0:
                    image = (image * 255).astype(np.uint8)
                image = Image.fromarray(image)
            elif not isinstance(image, Image.Image):
                raise ValueError("Image must be numpy array or PIL Image")
            
            # Process image
            logger.info("Processing image...")
            result = model(image)
            output_image = result[OutputKeys.OUTPUT_IMG]
            
            # Convert output to numpy array
            if isinstance(output_image, Image.Image):
                output_image = np.array(output_image)
            
            # Ensure output is float32 in range 0-1
            output_image = output_image.astype(np.float32) / 255.0
            logger.info("Image processing completed successfully")
            
            return output_image
            
        except Exception as e:
            error_msg = f"Error processing image: {str(e)}"
            logger.error(error_msg)
            raise RuntimeError(error_msg) from e
    
    def cleanup(self):
        """Release model resources"""
        for model in self.models.values():
            if hasattr(model, 'release'):
                model.release()
        self.models.clear()
        self.current_style = None
        logger.info("Released all model resources") 