from abc import ABC, abstractmethod
from typing import Any, Optional, Literal # Added Optional, Literal

# Import custom exceptions
from ..core.exceptions import LLMError

class LLMClient(ABC):
    """
    Abstract Base Class for Large Language Model clients.
    Defines the standard interface for interacting with different LLM providers.
    """

    @abstractmethod
    async def get_completion(
        self,
        prompt: str,
        system_prompt: Optional[str] = None,
        model: Optional[str] = None, # Allow provider-specific default
        max_tokens: int = 1500, # Sensible default, can be overridden
        temperature: float = 0.7, # Standard default, adjust based on use case
        response_format: Optional[Literal["text", "json_object"]] = None, # Added response_format
        **kwargs: Any # Allow provider-specific arguments (e.g., top_p, stop_sequences)
    ) -> str:
        """
        Sends a prompt (and optionally a system prompt) to the LLM and returns the text completion.

        Args:
            prompt: The main user prompt or instruction.
            system_prompt: An optional system-level instruction to guide the model's behavior.
            model: The specific model identifier to use (e.g., "gpt-4o", "claude-3-opus-20240229").
                   If None, the implementation should use a sensible default.
            max_tokens: The maximum number of tokens to generate in the completion.
            temperature: The sampling temperature (0.0 to 2.0). Lower values make output more
                         deterministic, higher values make it more random.
            response_format: Specify the desired format (e.g., 'json_object' for models that support it).
            **kwargs: Additional provider-specific parameters.

        Returns:
            The LLM's generated text completion as a string.

        Raises:
            LLMError: If the API call fails or returns an error.
            ConfigError: If the client is not configured correctly (e.g., missing API key).
        """
        pass

    # --- Potential future methods ---
    # @abstractmethod
    # async def get_chat_completion(
    #     self,
    #     messages: List[Dict[str, str]], # e.g., [{"role": "user", "content": "Hi"}]
    #     model: Optional[str] = None,
    #     max_tokens: int = 1500,
    #     temperature: float = 0.7,
    #     response_format: Optional[Literal["text", "json_object"]] = None,
    #     **kwargs: Any
    # ) -> str:
    #     """Handles conversational completions using a list of messages."""
    #     pass

    # @abstractmethod
    # async def get_embedding(self, text: str, model: Optional[str] = None) -> List[float]:
    #     """Generates embeddings for a given text."""
    #     pass

    # @abstractmethod
    # async def count_tokens(self, text: str, model: Optional[str] = None) -> int:
    #     """Counts the number of tokens for a given text and model."""
    #     pass