"""
Image Cache Service for AIPPT
Handles downloading and caching of template cover images
"""
import os
import hashlib
from pathlib import Path
from typing import Optional
import requests
from src.llm.aippt_app.config.aippt_config import AIPPTConfig


class ImageCacheService:
    """Service for caching downloaded images"""

    def __init__(self, config: Optional[AIPPTConfig] = None):
        """
        Initialize image cache service

        Args:
            config: AIPPTConfig instance, uses default if not provided
        """
        self.config = config or AIPPTConfig()
        # Set cache directory
        self.cache_dir = Path(__file__).parent.parent.parent.parent.parent / "cache" / "template_covers"
        self.cache_dir.mkdir(parents=True, exist_ok=True)

    def get_cached_image(self, image_url: str) -> str:
        """
        Get cached image or download if not exists

        Args:
            image_url: Original image URL from AIPPT

        Returns:
            Local file path to the cached image
        """
        # Generate cache ID from URL
        cache_id = self._generate_cache_id(image_url)

        # Determine file extension from URL
        ext = self._get_file_extension(image_url)
        cache_file = self.cache_dir / f"{cache_id}{ext}"

        # Return cached file if exists
        if cache_file.exists():
            return str(cache_file)

        # Download and cache the image
        return self._download_and_cache(image_url, cache_file)

    def _generate_cache_id(self, url: str) -> str:
        """
        Generate unique cache ID from URL

        Tries to extract template ID from URL path, otherwise uses URL hash

        Args:
            url: Image URL

        Returns:
            Cache ID string
        """
        # Try to extract template ID from URL
        # Example: https://chatmee.cn/api/common/oss/meta-doc/ppt_template/1940697631068151808.png
        # Extract: 1940697631068151808

        import re
        from urllib.parse import urlparse

        # Remove query parameters
        url_path = url.split('?')[0]

        # Try to find numeric ID in the path (before file extension)
        match = re.search(r'/(\d+)\.[a-zA-Z]+$', url_path)
        if match:
            return match.group(1)

        # Fallback: use MD5 hash of URL
        url_hash = hashlib.md5(url.encode()).hexdigest()
        return url_hash

    def _get_file_extension(self, url: str) -> str:
        """
        Extract file extension from URL

        Args:
            url: Image URL

        Returns:
            File extension (e.g., '.png', '.jpg')
        """
        # Remove query parameters
        url_path = url.split('?')[0]
        # Get extension
        ext = os.path.splitext(url_path)[1]
        # Default to .png if no extension
        return ext if ext else '.png'

    def _download_and_cache(self, image_url: str, cache_file: Path) -> str:
        """
        Download image from URL and save to cache

        Args:
            image_url: Image URL to download
            cache_file: Local path to save the image

        Returns:
            Local file path

        Raises:
            Exception: If download fails
        """
        try:
            # Prepare headers with authentication
            headers = self.config.get_headers()

            # Download image
            response = requests.get(
                image_url,
                headers=headers,
                timeout=30
            )
            response.raise_for_status()

            # Save to cache
            with open(cache_file, 'wb') as f:
                f.write(response.content)

            return str(cache_file)

        except Exception as e:
            raise Exception(f"Failed to download image from {image_url}: {str(e)}")

    def clear_cache(self, image_id: Optional[str] = None):
        """
        Clear image cache

        Args:
            image_id: If provided, clear only this image. Otherwise clear all.
        """
        if image_id:
            # Clear specific image
            for file in self.cache_dir.glob(f"{image_id}.*"):
                file.unlink()
        else:
            # Clear all cached images
            for file in self.cache_dir.glob("*"):
                file.unlink()

    def get_cache_size(self) -> int:
        """
        Get total cache size in bytes

        Returns:
            Total size of cached images in bytes
        """
        total_size = 0
        for file in self.cache_dir.glob("*"):
            total_size += file.stat().st_size
        return total_size
