import torch
from PIL import Image
import clip
import os
from typing import List, Dict, Union, Optional
import numpy as np
import config


class CLIPFilter:
    def __init__(self):
        # Load CLIP model
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model, self.preprocess = clip.load(config.CLIP_MODEL, device=self.device)

    def filter_images(self, image_paths: List[str], content: Dict, custom_style_prompt: Optional[str] = None) -> List[str]:
        """
        Filter images using CLIP model based on relevance to poster content and optional style prompt.
        
        Args:
            image_paths: List of paths to images
            content: Dictionary containing poster content
            custom_style_prompt: Optional user-defined style description
            
        Returns:
            List of filtered image paths sorted by relevance
        """
        if not image_paths:
            return []

        # Extract key content for CLIP comparison
        content_text = ""
        if "title" in content:
            content_text += content["title"] + " "

        if "objectives" in content:
            if isinstance(content["objectives"], list):
                content_text += " ".join(content["objectives"]) + " "
            else:
                content_text += content["objectives"] + " "

        if "results" in content:
            if isinstance(content["results"], list):
                content_text += " ".join(content["results"]) + " "
            else:
                content_text += content["results"] + " "

        # Create prompts for CLIP
        prompts = [
            f"Academic visualization for research on: {content_text}",
            "Professional, clear academic chart or diagram",
            "High quality scientific visualization",
            "Clear, informative data graphic for academic poster"
        ]
        
        # Add custom style prompt if provided
        if custom_style_prompt:
            prompts.append(custom_style_prompt)
            # Add combined prompt to give more weight to custom style
            prompts.append(f"{custom_style_prompt} for research on {content_text}")

        # Encode text prompts
        with torch.no_grad():
            text_features = self.model.encode_text(clip.tokenize(prompts).to(self.device))
            text_features = text_features.mean(dim=0, keepdim=True)
            text_features = text_features / text_features.norm(dim=-1, keepdim=True)

        # Score each image
        scores = {}
        for image_path in image_paths:
            try:
                image = self.preprocess(Image.open(image_path)).unsqueeze(0).to(self.device)
                with torch.no_grad():
                    image_features = self.model.encode_image(image)
                    image_features = image_features / image_features.norm(dim=-1, keepdim=True)
                    similarity = (100.0 * image_features @ text_features.T).item()
                    scores[image_path] = similarity
            except Exception as e:
                print(f"Error processing image {image_path}: {str(e)}")
                scores[image_path] = -1

        # Filter and sort images by score
        filtered_images = [k for k, v in sorted(scores.items(), key=lambda item: item[1], reverse=True) if v > 0]

        # Return top images (up to 3)
        return filtered_images[:3]
        
    def calculate_style_alignment(self, image_path: str, style_description: str) -> float:
        """
        Calculate alignment score between an image and a style description.
        
        Args:
            image_path: Path to the image
            style_description: User-defined style description
            
        Returns:
            Alignment score between 0 and 100
        """
        try:
            # Prepare image
            image = self.preprocess(Image.open(image_path)).unsqueeze(0).to(self.device)
            
            # Encode style description
            with torch.no_grad():
                text_features = self.model.encode_text(clip.tokenize([style_description]).to(self.device))
                text_features = text_features / text_features.norm(dim=-1, keepdim=True)
                
                # Encode image
                image_features = self.model.encode_image(image)
                image_features = image_features / image_features.norm(dim=-1, keepdim=True)
                
                # Calculate similarity
                similarity = (100.0 * image_features @ text_features.T).item()
                
            return similarity
        except Exception as e:
            print(f"Error calculating style alignment for {image_path}: {str(e)}")
            return 0.0
            
    def rank_images_by_style(self, image_paths: List[str], style_description: str) -> List[Dict]:
        """
        Rank multiple images by their alignment with a style description.
        
        Args:
            image_paths: List of paths to images
            style_description: User-defined style description
            
        Returns:
            List of dictionaries with image paths and scores, sorted by score
        """
        results = []
        
        for path in image_paths:
            score = self.calculate_style_alignment(path, style_description)
            results.append({"path": path, "score": score})
            
        # Sort by score in descending order
        return sorted(results, key=lambda x: x["score"], reverse=True)
