"""
Image OCR parser using PIL and pytesseract
"""
import time
import os
from pathlib import Path
from typing import Union, Dict, Any, Optional
import logging

try:
    from PIL import Image, ImageEnhance, ImageFilter
    import pytesseract
except ImportError:
    Image = None
    pytesseract = None

from .base import FileParser, ParseResult, FileType

logger = logging.getLogger(__name__)


class ImageParser(FileParser):
    """
    Parser for image files using OCR with PIL and pytesseract
    """
    
    def __init__(self, max_file_size: int = 20 * 1024 * 1024):
        super().__init__(max_file_size)
        
        if Image is None or pytesseract is None:
            raise ImportError("PIL and pytesseract are required for OCR. Install with: pip install Pillow pytesseract")
        
        # Try to detect tesseract installation
        try:
            pytesseract.get_tesseract_version()
        except Exception:
            logger.warning("Tesseract OCR engine not found. Please install Tesseract OCR.")
    
    def supports_file_type(self, file_type: FileType) -> bool:
        """Check if parser supports image files"""
        return file_type == FileType.IMAGE
    
    def parse(self, file_path: Union[str, Path], language: str = 'eng', preprocessing: bool = True) -> ParseResult:
        """
        Parse image file using OCR and extract text content
        
        Args:
            file_path: Path to the image file
            language: OCR language (default: 'eng' for English)
            preprocessing: Whether to apply image preprocessing for better OCR
            
        Returns:
            ParseResult with extracted text and metadata
        """
        start_time = time.time()
        file_path = Path(file_path)
        
        # Validate file
        if not self.validate_file(file_path):
            return self.create_error_result("File validation failed", file_path)
        
        try:
            # Open and load image
            with Image.open(str(file_path)) as image:
                original_image = image.copy()
                
                # Get image metadata
                metadata = self._get_image_metadata(original_image, file_path)
                
                # Apply preprocessing if enabled
                if preprocessing:
                    processed_image = self._preprocess_image(original_image)
                    metadata['preprocessing_applied'] = True
                else:
                    processed_image = original_image
                    metadata['preprocessing_applied'] = False
                
                # Perform OCR
                try:
                    # Configure pytesseract for better accuracy
                    custom_config = r'--oem 3 --psm 6 -c tessedit_char_whitelist=0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.,!?;:()[]{}\"\'\ \n\t'
                    
                    # Extract text
                    extracted_text = pytesseract.image_to_string(
                        processed_image, 
                        lang=language,
                        config=custom_config
                    )
                    
                    # Get confidence scores
                    try:
                        ocr_data = pytesseract.image_to_data(processed_image, lang=language, output_type=pytesseract.Output.DICT)
                        confidences = [int(conf) for conf in ocr_data['conf'] if int(conf) > 0]
                        
                        if confidences:
                            metadata['ocr_confidence'] = {
                                'average': sum(confidences) / len(confidences),
                                'min': min(confidences),
                                'max': max(confidences),
                                'word_count': len(confidences)
                            }
                    except Exception as e:
                        logger.warning(f"Could not extract OCR confidence data: {e}")
                    
                    # Clean up extracted text
                    cleaned_text = self._clean_ocr_text(extracted_text)
                    
                    if not cleaned_text.strip():
                        return self.create_error_result("No text found in image", file_path)
                    
                    metadata.update({
                        'ocr_language': language,
                        'raw_text_length': len(extracted_text),
                        'cleaned_text_length': len(cleaned_text),
                        'line_count': len(cleaned_text.split('\n')),
                        'word_count': len(cleaned_text.split()),
                    })
                    
                    processing_time = time.time() - start_time
                    
                    return self.create_success_result(
                        content=cleaned_text,
                        file_path=file_path,
                        file_type=FileType.IMAGE,
                        processing_time=processing_time,
                        additional_metadata=metadata
                    )
                    
                except pytesseract.TesseractNotFoundError:
                    error_msg = "Tesseract OCR engine not found. Please install Tesseract OCR."
                    logger.error(error_msg)
                    return self.create_error_result(error_msg, file_path)
                
                except pytesseract.TesseractError as e:
                    error_msg = f"Tesseract OCR error: {str(e)}"
                    logger.error(error_msg)
                    return self.create_error_result(error_msg, file_path)
            
        except Exception as e:
            error_msg = f"Error parsing image file: {str(e)}"
            logger.error(error_msg)
            return self.create_error_result(error_msg, file_path)
    
    def _get_image_metadata(self, image: Any, file_path: Path) -> Dict[str, Any]:
        """
        Extract image metadata
        
        Args:
            image: PIL Image object
            file_path: Path to image file
            
        Returns:
            Dictionary with image metadata
        """
        metadata = {
            'image_format': image.format,
            'image_mode': image.mode,
            'image_size': image.size,  # (width, height)
            'image_width': image.width,
            'image_height': image.height,
        }
        
        # Get EXIF data if available
        try:
            if hasattr(image, '_getexif') and image._getexif():
                exif_data = image._getexif()
                if exif_data:
                    # Extract common EXIF tags
                    metadata['has_exif'] = True
                    # Add specific EXIF data if needed
                else:
                    metadata['has_exif'] = False
            else:
                metadata['has_exif'] = False
        except Exception:
            metadata['has_exif'] = False
        
        # Check if image has transparency
        metadata['has_transparency'] = image.mode in ('RGBA', 'LA') or 'transparency' in image.info
        
        return metadata
    
    def _preprocess_image(self, image: Any) -> Any:
        """
        Apply preprocessing to improve OCR accuracy
        
        Args:
            image: Original PIL Image
            
        Returns:
            Preprocessed PIL Image
        """
        try:
            # Convert to RGB if necessary
            if image.mode != 'RGB':
                image = image.convert('RGB')
            
            # Convert to grayscale for better OCR
            gray_image = image.convert('L')
            
            # Enhance contrast
            enhancer = ImageEnhance.Contrast(gray_image)
            enhanced_image = enhancer.enhance(2.0)  # Increase contrast
            
            # Enhance sharpness
            sharpness_enhancer = ImageEnhance.Sharpness(enhanced_image)
            sharp_image = sharpness_enhancer.enhance(2.0)
            
            # Apply slight blur to reduce noise (optional)
            # blurred_image = sharp_image.filter(ImageFilter.GaussianBlur(radius=0.5))
            
            return sharp_image
            
        except Exception as e:
            logger.warning(f"Error in image preprocessing: {e}")
            return image
    
    def _clean_ocr_text(self, raw_text: str) -> str:
        """
        Clean up OCR extracted text
        
        Args:
            raw_text: Raw text from OCR
            
        Returns:
            Cleaned text
        """
        if not raw_text:
            return ""
        
        lines = []
        for line in raw_text.split('\n'):
            # Remove excessive whitespace
            line = ' '.join(line.split())
            
            # Skip very short lines that are likely OCR errors
            if len(line.strip()) >= 1:  # Keep lines with at least 1 character
                lines.append(line.strip())
        
        # Join lines and clean up
        cleaned_text = '\n'.join(lines)
        
        # Remove multiple consecutive newlines
        import re
        cleaned_text = re.sub(r'\n\s*\n', '\n\n', cleaned_text)
        
        return cleaned_text.strip()
    
    def extract_with_bounding_boxes(self, file_path: Union[str, Path], language: str = 'eng') -> Dict[str, Any]:
        """
        Extract text with bounding box information
        
        Args:
            file_path: Path to the image file
            language: OCR language
            
        Returns:
            Dictionary with text and bounding box data
        """
        file_path = Path(file_path)
        
        if not self.validate_file(file_path):
            return {"error": "File validation failed"}
        
        try:
            with Image.open(str(file_path)) as image:
                # Get OCR data with bounding boxes
                ocr_data = pytesseract.image_to_data(
                    image, 
                    lang=language, 
                    output_type=pytesseract.Output.DICT
                )
                
                # Process the data
                words_data = []
                for i in range(len(ocr_data['text'])):
                    text = ocr_data['text'][i].strip()
                    confidence = int(ocr_data['conf'][i])
                    
                    if text and confidence > 0:  # Only include confident detections
                        words_data.append({
                            'text': text,
                            'confidence': confidence,
                            'left': ocr_data['left'][i],
                            'top': ocr_data['top'][i],
                            'width': ocr_data['width'][i],
                            'height': ocr_data['height'][i],
                            'level': ocr_data['level'][i],
                            'page_num': ocr_data['page_num'][i],
                            'block_num': ocr_data['block_num'][i],
                            'par_num': ocr_data['par_num'][i],
                            'line_num': ocr_data['line_num'][i],
                            'word_num': ocr_data['word_num'][i]
                        })
                
                return {
                    'words': words_data,
                    'total_words': len(words_data),
                    'image_size': image.size
                }
                
        except Exception as e:
            logger.error(f"Error extracting bounding boxes: {e}")
            return {"error": str(e)}
    
    def get_supported_languages(self) -> list:
        """
        Get list of supported OCR languages
        
        Returns:
            List of language codes supported by tesseract
        """
        try:
            languages = pytesseract.get_languages()
            return languages
        except Exception as e:
            logger.error(f"Error getting supported languages: {e}")
            return ['eng']  # Default to English
    
    def check_image_quality(self, file_path: Union[str, Path]) -> Dict[str, Any]:
        """
        Analyze image quality for OCR readiness
        
        Args:
            file_path: Path to the image file
            
        Returns:
            Dictionary with quality metrics
        """
        file_path = Path(file_path)
        
        try:
            with Image.open(str(file_path)) as image:
                # Basic quality metrics
                width, height = image.size
                pixel_count = width * height
                
                quality_info = {
                    'resolution': f"{width}x{height}",
                    'pixel_count': pixel_count,
                    'format': image.format,
                    'mode': image.mode,
                    'estimated_quality': 'unknown'
                }
                
                # Estimate quality based on resolution
                if pixel_count < 100000:  # Less than 100k pixels
                    quality_info['estimated_quality'] = 'low'
                elif pixel_count < 1000000:  # Less than 1M pixels
                    quality_info['estimated_quality'] = 'medium'
                else:
                    quality_info['estimated_quality'] = 'high'
                
                # Additional checks
                if width < 300 or height < 300:
                    quality_info['warning'] = 'Low resolution may affect OCR accuracy'
                
                if image.mode not in ['RGB', 'L', 'RGBA']:
                    quality_info['warning'] = f'Unusual color mode ({image.mode}) may affect OCR'
                
                return quality_info
                
        except Exception as e:
            logger.error(f"Error checking image quality: {e}")
            return {"error": str(e)}