import os
import google.generativeai as genai
from dotenv import load_dotenv
from loguru import logger
from PIL import Image
import io
import httpx
import pathlib
from google.generativeai import types as genai_types
import hashlib # Import hashlib for caching

load_dotenv()

class LLMInterface:
    def __init__(self):
        self.api_key = os.getenv("GEMINI_API_KEY")
        if not self.api_key:
            raise ValueError("GEMINI_API_KEY not found in environment variables.")
        genai.configure(api_key=self.api_key)
        
        # Explicitly use gemini-2.5-flash-preview-04-17 as it's a recommended model
        self.default_model_name = 'gemini-2.5-flash-preview-04-17'
        self.model = genai.GenerativeModel(self.default_model_name)
        print(f"Using default model: {self.default_model_name}")
        self.log_dir = None
        self.log_counter = {}
        self.cache = {} # Initialize cache

        # Define pricing per 1M tokens in USD
        self.pricing = {
            "gemini-2.5-pro": {
                "input_under_200k": 1.25,
                "input_over_200k": 2.5,
                "output_under_200k": 10.0,
                "output_over_200k": 15.0,
            },
            "gemini-2.5-flash-preview-04-17": {
                "input": 0.15,
                "output": 0.6,
            },
        }

    def _calculate_cost(self, model_name: str, input_tokens: int, output_tokens: int) -> float:
        cost = 0.0
        if model_name == "gemini-2.5-pro":
            # Input cost
            if input_tokens < 200000:
                cost += (input_tokens / 1_000_000) * self.pricing[model_name]["input_under_200k"]
            else:
                cost += (input_tokens / 1_000_000) * self.pricing[model_name]["input_over_200k"]
            # Output cost
            if output_tokens < 200000:
                cost += (output_tokens / 1_000_000) * self.pricing[model_name]["output_under_200k"]
            else:
                cost += (output_tokens / 1_000_000) * self.pricing[model_name]["output_over_200k"]
        elif model_name == "gemini-2.5-flash-preview-04-17":
            cost += (input_tokens / 1_000_000) * self.pricing[model_name]["input"]
            cost += (output_tokens / 1_000_000) * self.pricing[model_name]["output"]
        return cost

    def set_log_dir(self, log_dir: str):
        self.log_dir = log_dir
        os.makedirs(self.log_dir, exist_ok=True)
        self.log_counter = {} # Reset counter for new task

    def select_model(self, task_type: str = "general") -> str:
        """Selects an LLM based on task type, simulating different models for different needs."""
        if task_type == "coding":
            # In a real scenario, this might return a code-optimized model
            selected_model = 'gemini-2.5-flash-preview-04-17' # Placeholder
        elif task_type == "creative":
            # In a real scenario, this might return a creative-optimized model
            selected_model = 'gemini-2.5-flash-preview-04-17' # Placeholder
        elif task_type == "summarization":
            # In a real scenario, this might return a cost-effective summarization model
            selected_model = 'gemini-2.5-flash-preview-04-17' # Placeholder
        else:
            selected_model = self.default_model_name
        
        # Only re-initialize the model if it's different from the current one
        if not hasattr(self, 'current_model_name') or self.current_model_name != selected_model:
            self.model = genai.GenerativeModel(selected_model)
            self.current_model_name = selected_model
            print(f"Selected model for {task_type} task: {selected_model}")
        return selected_model

    def generate_content(self, prompt: str, task_type: str = "general", task_name: str = None) -> str:
        self.select_model(task_type)

        # Generate a hash for the prompt to use as a cache key
        prompt_hash = hashlib.md5(prompt.encode('utf-8')).hexdigest()

        # Check if the prompt is in the cache
        if prompt_hash in self.cache:
            logger.info(f"Returning cached response for prompt hash: {prompt_hash}")
            cached_response = self.cache[prompt_hash]
            # Set tokens and cost to 0 for cached responses
            cached_response["input_tokens"] = 0
            cached_response["output_tokens"] = 0
            cached_response["cost"] = 0.0
            logger.info(f"llm use catch ")
            return cached_response

        if self.log_dir:
            log_prefix = task_name if task_name else task_type
            self.log_counter[log_prefix] = self.log_counter.get(log_prefix, 0) + 1
            log_id = self.log_counter[log_prefix]
            input_log_path = os.path.join(self.log_dir, f"{log_prefix}_{log_id:03d}_input.log")
            with open(input_log_path, "w", encoding="utf-8") as f:
                f.write(prompt)
            

        try:
            response = self.model.generate_content(prompt)
            output_content = response.text
            
            input_tokens = 0
            output_tokens = 0
            cost = 0.0

            if hasattr(response, 'usage_metadata'):
                usage = response.usage_metadata
                input_tokens = usage.prompt_token_count
                output_tokens = usage.candidates_token_count
                cost = self._calculate_cost(self.current_model_name, input_tokens, output_tokens)

            if self.log_dir:
                output_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_output.log")
                with open(output_log_path, "w", encoding="utf-8") as f:
                    f.write(output_content)
                

            return_data = {
                "output_content": output_content,
                "input_tokens": input_tokens,
                "output_tokens": output_tokens,
                "cost": cost * 7
            }
            self.cache[prompt_hash] = return_data.copy() # Store a copy to avoid modification issues
            return return_data
        except Exception as e:
            error_message = f"Error generating content: {e}"
            if self.log_dir:
                error_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_error.log")
                with open(error_log_path, "w", encoding="utf-8") as f:
                    f.write(error_message)
                logger.error(f"LLM error logged to {error_log_path}")
            return {
                "output_content": error_message,
                "input_tokens": 0,
                "output_tokens": 0,
                "cost": 0.0
            }

    def generate_content_with_image(self, image_path: str, prompt: str, task_type: str = "image_understanding"):
        self.select_model(task_type)

        if self.log_dir:
            self.log_counter[task_type] = self.log_counter.get(task_type, 0) + 1
            log_id = self.log_counter[task_type]
            input_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_input.log")
            with open(input_log_path, "w", encoding="utf-8") as f:
                f.write(f"Image Path: {image_path}\nPrompt: {prompt}")
            

        try:
            img = Image.open(image_path)
            contents = [prompt, img]
            response = self.model.generate_content(contents)
            output_content = response.text

            input_tokens = 0
            output_tokens = 0
            cost = 0.0

            if hasattr(response, 'usage_metadata'):
                usage = response.usage_metadata
                input_tokens = usage.prompt_token_count
                output_tokens = usage.candidates_token_count
                cost = self._calculate_cost(self.current_model_name, input_tokens, output_tokens)

            if self.log_dir:
                output_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_output.log")
                with open(output_log_path, "w", encoding="utf-8") as f:
                    f.write(output_content)
                

            return {
                "output_content": output_content,
                "input_tokens": input_tokens,
                "output_tokens": output_tokens,
                "cost": cost * 7
            }
        except Exception as e:
            error_message = f"Error generating content with image: {e}"
            if self.log_dir:
                error_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_error.log")
                with open(error_log_path, "w", encoding="utf-8") as f:
                    f.write(error_message)
                logger.error(f"LLM image error logged to {error_log_path}")
            return {
                "output_content": error_message,
                "input_tokens": 0,
                "output_tokens": 0,
                "cost": 0.0
            }

    def generate_content_with_thinking(self, prompt: str, thinking_budget: int = -1, include_thoughts: bool = False, task_type: str = "thinking_content_generation"):
        self.select_model(task_type)

        if self.log_dir:
            self.log_counter[task_type] = self.log_counter.get(task_type, 0) + 1
            log_id = self.log_counter[task_type]
            input_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_input.log")
            with open(input_log_path, "w", encoding="utf-8") as f:
                f.write(f"Prompt: {prompt}\nThinking Budget: {thinking_budget}\nInclude Thoughts: {include_thoughts}")
            

        try:
            generation_config = genai.types.GenerationConfig(
                thinking_config=genai.types.ThinkingConfig(
                    thinking_budget=thinking_budget,
                    include_thoughts=include_thoughts
                )
            )
            response = self.model.generate_content(prompt, generation_config=generation_config)
            
            output_content = ""
            thoughts_summary = ""
            for part in response.candidates[0].content.parts:
                if part.text:
                    if part.thought:
                        thoughts_summary += part.text
                    else:
                        output_content += part.text

            input_tokens = 0
            output_tokens = 0
            thoughts_tokens = 0
            cost = 0.0

            if hasattr(response, 'usage_metadata'):
                usage = response.usage_metadata
                input_tokens = usage.prompt_token_count
                output_tokens = usage.candidates_token_count
                thoughts_tokens = usage.thoughts_token_count if hasattr(usage, 'thoughts_token_count') else 0
                cost = self._calculate_cost(self.current_model_name, input_tokens, output_tokens + thoughts_tokens)

            if self.log_dir:
                output_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_output.log")
                with open(output_log_path, "w", encoding="utf-8") as f:
                    f.write(f"Output Content: {output_content}\nThoughts Summary: {thoughts_summary}")
                

            return {
                "output_content": output_content,
                "thoughts_summary": thoughts_summary,
                "input_tokens": input_tokens,
                "output_tokens": output_tokens,
                "thoughts_tokens": thoughts_tokens,
                "cost": cost * 7
            }
        except Exception as e:
            error_message = f"Error generating content with thinking: {e}"
            if self.log_dir:
                error_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_error.log")
                with open(error_log_path, "w", encoding="utf-8") as f:
                    f.write(error_message)
                logger.error(f"LLM thinking error logged to {error_log_path}")
            return {
                "output_content": error_message,
                "thoughts_summary": "",
                "input_tokens": 0,
                "output_tokens": 0,
                "thoughts_tokens": 0,
                "cost": 0.0
            }

    def generate_content_with_pdf(self, pdf_path: str, prompt: str, task_type: str = "pdf_understanding"):
        self.select_model(task_type)

        if self.log_dir:
            self.log_counter[task_type] = self.log_counter.get(task_type, 0) + 1
            log_id = self.log_counter[task_type]
            input_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_input.log")
            with open(input_log_path, "w", encoding="utf-8") as f:
                f.write(f"PDF Path: {pdf_path}\nPrompt: {prompt}")
            

        try:
            filepath = pathlib.Path(pdf_path)
            contents = [
                genai.types.Part.from_bytes(
                    data=filepath.read_bytes(),
                    mime_type='application/pdf',
                ),
                prompt
            ]
            response = self.model.generate_content(contents)
            output_content = response.text

            input_tokens = 0
            output_tokens = 0
            cost = 0.0

            if hasattr(response, 'usage_metadata'):
                usage = response.usage_metadata
                input_tokens = usage.prompt_token_count
                output_tokens = usage.candidates_token_count
                cost = self._calculate_cost(self.current_model_name, input_tokens, output_tokens)

            if self.log_dir:
                output_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_output.log")
                with open(output_log_path, "w", encoding="utf-8") as f:
                    f.write(output_content)
                

            return {
                "output_content": output_content,
                "input_tokens": input_tokens,
                "output_tokens": output_tokens,
                "cost": cost * 7
            }
        except Exception as e:
            error_message = f"Error generating content with PDF: {e}"
            if self.log_dir:
                error_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_error.log")
                with open(error_log_path, "w", encoding="utf-8") as f:
                    f.write(error_message)
                logger.error(f"LLM PDF error logged to {error_log_path}")
            return {
                "output_content": error_message,
                "input_tokens": 0,
                "output_tokens": 0,
                "cost": 0.0
            }

    def generate_content_with_pdf_url(self, pdf_url: str, prompt: str, task_type: str = "pdf_url_understanding"):
        self.select_model(task_type)

        if self.log_dir:
            self.log_counter[task_type] = self.log_counter.get(task_type, 0) + 1
            log_id = self.log_counter[task_type]
            input_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_input.log")
            with open(input_log_path, "w", encoding="utf-8") as f:
                f.write(f"PDF URL: {pdf_url}\nPrompt: {prompt}")
            

        try:
            doc_data = httpx.get(pdf_url).content
            contents = [
                genai.types.Part.from_bytes(
                    data=doc_data,
                    mime_type='application/pdf',
                ),
                prompt
            ]
            response = self.model.generate_content(contents)
            output_content = response.text

            input_tokens = 0
            output_tokens = 0
            cost = 0.0

            if hasattr(response, 'usage_metadata'):
                usage = response.usage_metadata
                input_tokens = usage.prompt_token_count
                output_tokens = usage.candidates_token_count
                cost = self._calculate_cost(self.current_model_name, input_tokens, output_tokens)

            if self.log_dir:
                output_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_output.log")
                with open(output_log_path, "w", encoding="utf-8") as f:
                    f.write(output_content)
                

            return {
                "output_content": output_content,
                "input_tokens": input_tokens,
                "output_tokens": output_tokens,
                "cost": cost * 7
            }
        except Exception as e:
            error_message = f"Error generating content with PDF URL: {e}"
            if self.log_dir:
                error_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_error.log")
                with open(error_log_path, "w", encoding="utf-8") as f:
                    f.write(error_message)
                logger.error(f"LLM PDF URL error logged to {error_log_path}")
            return {
                "output_content": error_message,
                "input_tokens": 0,
                "output_tokens": 0,
                "cost": 0.0
            }

    def generate_content_with_multiple_pdfs(self, pdf_paths: list, prompt: str, task_type: str = "multiple_pdf_understanding"):
        self.select_model(task_type)

        if self.log_dir:
            self.log_counter[task_type] = self.log_counter.get(task_type, 0) + 1
            log_id = self.log_counter[task_type]
            input_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_input.log")
            with open(input_log_path, "w", encoding="utf-8") as f:
                f.write(f"PDF Paths: {pdf_paths}\nPrompt: {prompt}")
            

        try:
            contents = []
            for pdf_path in pdf_paths:
                filepath = pathlib.Path(pdf_path)
                contents.append(genai.types.Part.from_bytes(
                    data=filepath.read_bytes(),
                    mime_type='application/pdf',
                ))
            contents.append(prompt)

            response = self.model.generate_content(contents)
            output_content = response.text

            input_tokens = 0
            output_tokens = 0
            cost = 0.0

            if hasattr(response, 'usage_metadata'):
                usage = response.usage_metadata
                input_tokens = usage.prompt_token_count
                output_tokens = usage.candidates_token_count
                cost = self._calculate_cost(self.current_model_name, input_tokens, output_tokens)

            if self.log_dir:
                output_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_output.log")
                with open(output_log_path, "w", encoding="utf-8") as f:
                    f.write(output_content)
                

            return {
                "output_content": output_content,
                "input_tokens": input_tokens,
                "output_tokens": output_tokens,
                "cost": cost * 7
            }
        except Exception as e:
            error_message = f"Error generating content with multiple PDFs: {e}"
            if self.log_dir:
                error_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_error.log")
                with open(error_log_path, "w", encoding="utf-8") as f:
                    f.write(error_message)
                logger.error(f"LLM multiple PDF error logged to {error_log_path}")
            return {
                "output_content": error_message,
                "input_tokens": 0,
                "output_tokens": 0,
                "cost": 0.0
            }

    def upload_file_to_gemini(self, file_path: str, mime_type: str):
        if self.log_dir:
            log_id = self.log_counter.get("file_upload", 0) + 1
            self.log_counter["file_upload"] = log_id
            input_log_path = os.path.join(self.log_dir, f"file_upload_{log_id:03d}_input.log")
            with open(input_log_path, "w", encoding="utf-8") as f:
                f.write(f"File Path: {file_path}\nMIME Type: {mime_type}")
            

        try:
            uploaded_file = self.model.client.files.upload(file=file_path, config=dict(mime_type=mime_type))
            if self.log_dir:
                output_log_path = os.path.join(self.log_dir, f"file_upload_{log_id:03d}_output.log")
                with open(output_log_path, "w", encoding="utf-8") as f:
                    f.write(f"Uploaded File Name: {uploaded_file.name}")
                
            return uploaded_file
        except Exception as e:
            error_message = f"Error uploading file to Gemini: {e}"
            if self.log_dir:
                error_log_path = os.path.join(self.log_dir, f"file_upload_{log_id:03d}_error.log")
                with open(error_log_path, "w", encoding="utf-8") as f:
                    f.write(error_message)
                logger.error(f"LLM file upload error logged to {error_log_path}")
            raise e # Re-raise the exception to indicate failure

    def generate_content_with_audio(self, audio_path: str, prompt: str, task_type: str = "audio_understanding"):
        self.select_model(task_type)

        if self.log_dir:
            self.log_counter[task_type] = self.log_counter.get(task_type, 0) + 1
            log_id = self.log_counter[task_type]
            input_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_input.log")
            with open(input_log_path, "w", encoding="utf-8") as f:
                f.write(f"Audio Path: {audio_path}\nPrompt: {prompt}")
            

        try:
            filepath = pathlib.Path(audio_path)
            contents = [
                prompt,
                genai.types.Part.from_bytes(
                    data=filepath.read_bytes(),
                    mime_type='audio/mp3',
                )
            ]
            response = self.model.generate_content(contents)
            output_content = response.text

            input_tokens = 0
            output_tokens = 0
            cost = 0.0

            if hasattr(response, 'usage_metadata'):
                usage = response.usage_metadata
                input_tokens = usage.prompt_token_count
                output_tokens = usage.candidates_token_count
                cost = self._calculate_cost(self.current_model_name, input_tokens, output_tokens)

            if self.log_dir:
                output_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_output.log")
                with open(output_log_path, "w", encoding="utf-8") as f:
                    f.write(output_content)
                

            return {
                "output_content": output_content,
                "input_tokens": input_tokens,
                "output_tokens": output_tokens,
                "cost": cost * 7
            }
        except Exception as e:
            error_message = f"Error generating content with audio: {e}"
            if self.log_dir:
                error_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_error.log")
                with open(error_log_path, "w", encoding="utf-8") as f:
                    f.write(error_message)
                logger.error(f"LLM audio error logged to {error_log_path}")
            return {
                "output_content": error_message,
                "input_tokens": 0,
                "output_tokens": 0,
                "cost": 0.0
            }
    def count_audio_tokens(self, audio_path: str, task_type: str = "audio_token_count"):
        self.select_model(task_type)

        if self.log_dir:
            self.log_counter[task_type] = self.log_counter.get(task_type, 0) + 1
            log_id = self.log_counter[task_type]
            input_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_input.log")
            with open(input_log_path, "w", encoding="utf-8") as f:
                f.write(f"Audio Path: {audio_path}")
            

        try:
            filepath = pathlib.Path(audio_path)
            contents = [
                genai.types.Part.from_bytes(
                    data=filepath.read_bytes(),
                    mime_type='audio/mp3',
                )
            ]
            response = self.model.count_tokens(contents=contents)
            token_count = response.total_tokens

            if self.log_dir:
                output_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_output.log")
                with open(output_log_path, "w", encoding="utf-8") as f:
                    f.write(f"Total Tokens: {token_count}")
                

            return {"total_tokens": token_count}
        except Exception as e:
            error_message = f"Error counting audio tokens: {e}"
            if self.log_dir:
                error_log_path = os.path.join(self.log_dir, f"{task_type}_{log_id:03d}_error.log")
                with open(error_log_path, "w", encoding="utf-8") as f:
                    f.write(error_message)
                logger.error(f"LLM audio token count error logged to {error_log_path}")
            return {"total_tokens": 0}

    def __call__(self, messages: list, temperature: float = 0.1, max_tokens: int = 512, stop_strs: list = None, num_comps: int = 1, task_type: str = "general", top_p: float = 1.0, top_k: int = 0) -> str:
        self.select_model(task_type)
        # Assuming 'messages' is a list of Message objects or similar, convert to a format
        # that self.model.generate_content can accept if necessary.
        # For now, let's assume it can directly take the 'messages' list.
        # If the model expects a single string, you might need to format 'messages' into a prompt string.
        formatted_prompt = ""
        for msg in messages:
            formatted_prompt += f"{msg.role}: {msg.content}\n"
        
        generation_config = {
            "temperature": temperature,
            "max_output_tokens": max_tokens,
            "stop_sequences": stop_strs,
            "top_p": top_p,
            "top_k": top_k,
        }

        try:
            response = self.model.generate_content(formatted_prompt, generation_config=generation_config)
            
            input_tokens = 0
            output_tokens = 0
            cost = 0.0

            if hasattr(response, 'usage_metadata'):
                usage = response.usage_metadata
                input_tokens = usage.prompt_token_count
                output_tokens = usage.candidates_token_count
                cost = self._calculate_cost(self.current_model_name, input_tokens, output_tokens)

            return {
                "output_content": response.text,
                "input_tokens": input_tokens,
                "output_tokens": output_tokens,
                "cost": cost * 7
            }
        except Exception as e:
            return {
                "output_content": f"Error generating content in __call__: {e}",
                "input_tokens": 0,
                "output_tokens": 0,
                "cost": 0.0
            }

    # def embed_query(self, text: str) -> list[float]:
    #     # Placeholder for embedding function
    #     # In a real scenario, this would use an actual embedding model
    #     return [0.0] * 768  # Return a dummy embedding (e.g., 768-dimensional zero vector)

if __name__ == "__main__":
    # This block is for testing the LLMInterface directly
    llm = LLMInterface()
    test_prompt = "Hello, what is your purpose?"
    print(f"Prompt: {test_prompt}")
    response = llm.generate_content(test_prompt)
    print(f"Response: {response}")
