# Purpose: Implements the LLMClient interface for OpenAI API.
# Changes:
# - Corrected access to status_code in APIError handling using getattr for safety.

import os
import logging
from typing import Any, Optional, Literal

# Use standard library json, consider orjson if performance needed later
import json

# Import OpenAI library
try:
    # Using v1+ syntax
    from openai import AsyncOpenAI, OpenAIError, APIError, RateLimitError, APIConnectionError, AuthenticationError
except ImportError:
    raise ImportError("OpenAI library not installed. Please run 'poetry add openai'")

# Use relative imports
from .base import LLMClient
from ..core.exceptions import LLMError, ConfigError
from ..core.config import get_config_value
# Use core logger setup
from ..core.logging_setup import get_logger
logger = get_logger(__name__)

class OpenAIClient(LLMClient):
    """Client for interacting with OpenAI APIs (v1.x+)."""

    def __init__(self, api_key: Optional[str] = None, base_url: Optional[str] = None):
        """Initializes the AsyncOpenAI client."""
        self.api_key = api_key or os.getenv("OPENAI_API_KEY")
        self.base_url = base_url or get_config_value("llm_clients.openai.base_url") or os.getenv("OPENAI_BASE_URL")

        if not self.api_key:
            raise ConfigError("OpenAI API key not found. Set OPENAI_API_KEY environment variable or provide it during client initialization.")

        logger.info(f"Initializing OpenAI client. Base URL: {'Default OpenAI' if not self.base_url else self.base_url}")
        try:
            self.client = AsyncOpenAI(
                api_key=self.api_key,
                base_url=self.base_url,
                # Example: Add reasonable timeouts
                timeout=60.0, # Timeout for individual requests
                max_retries=2,  # Number of retries on connection errors etc.
            )
        except Exception as e:
            logger.error(f"Failed to initialize OpenAI client: {e}", exc_info=True)
            raise LLMError(f"OpenAI client initialization failed: {e}", provider="OpenAI") from e

    async def get_completion(
        self,
        prompt: str,
        system_prompt: Optional[str] = None,
        model: Optional[str] = None,
        max_tokens: int = 2000,
        temperature: float = 0.2,
        response_format: Optional[Literal["text", "json_object"]] = None,
        **kwargs: Any
    ) -> str:
        """Gets a completion using the OpenAI Chat Completions API."""
        target_model = model or get_config_value("agent.default_llm_model", "gpt-3.5-turbo")
        logger.info(f"Requesting completion from OpenAI model: {target_model}")

        messages = []
        if system_prompt: messages.append({"role": "system", "content": system_prompt})
        messages.append({"role": "user", "content": prompt})
        # logger.debug(f"OpenAI Request Messages: {messages}") # Be careful logging full prompts

        request_params = {
            "model": target_model,
            "messages": messages,
            "max_tokens": max_tokens,
            "temperature": temperature,
            **kwargs
        }

        if response_format == "json_object":
            # Ensure prompt instructs model to output JSON for this to work reliably
            request_params["response_format"] = {"type": "json_object"}
            logger.info("Requesting JSON object response format from OpenAI.")
        elif response_format not in [None, "text"]:
             logger.warning(f"Unsupported response_format '{response_format}' for OpenAI, using default.")

        try:
            # Log params except potentially large messages list
            log_params = {k: v for k, v in request_params.items() if k != 'messages'}
            logger.debug(f"Sending request to OpenAI ChatCompletion API with params: {log_params}")
            completion = await self.client.chat.completions.create(**request_params)

            # Process Response
            if not completion.choices:
                raise LLMError("OpenAI API response contained no choices.", provider="OpenAI")

            first_choice = completion.choices[0]
            if not first_choice.message or first_choice.message.content is None:
                # Handle cases where content might be None (e.g., filtered)
                logger.warning(f"OpenAI API response message or content was null. Finish reason: {first_choice.finish_reason}")
                content = "" # Default to empty string
            else:
                 content = first_choice.message.content

            finish_reason = first_choice.finish_reason

            # Log usage
            usage_info = "Usage info not available."
            if completion.usage:
                usage_info = (f"Usage: Prompt={completion.usage.prompt_tokens}, "
                              f"Completion={completion.usage.completion_tokens}, Total={completion.usage.total_tokens}")
            logger.info(f"OpenAI completion received. Finish Reason: {finish_reason}. {usage_info}")

            # Handle problematic finish reasons
            if finish_reason == "length":
                 logger.warning(f"OpenAI completion truncated due to max_tokens limit ({max_tokens}).")
            elif finish_reason == "content_filter":
                 logger.error("OpenAI completion stopped due to content filter.")
                 # Return empty string or raise error? Let's return empty for now.
                 # raise LLMError("Content filter triggered.", provider="OpenAI")
                 content = "" # Ensure content is empty if filtered

            logger.debug(f"LLM Response Content (length {len(content)}): {(content[:500] + '...') if len(content) > 500 else content}")
            return content.strip()

        # Error Handling
        except AuthenticationError as e:
             logger.error(f"OpenAI Authentication Error: {e}", exc_info=False)
             raise ConfigError(f"OpenAI Authentication Failed: {e}. Check API key.") from e
        except RateLimitError as e:
             logger.error(f"OpenAI Rate Limit Error: {e}", exc_info=False)
             raise LLMError(f"OpenAI rate limit exceeded: {e}", provider="OpenAI") from e
        except APIConnectionError as e:
             logger.error(f"OpenAI API Connection Error: {e}", exc_info=False)
             raise LLMError(f"Failed to connect to OpenAI API: {e}", provider="OpenAI") from e
        except APIError as e:
             # Safely access status_code using getattr
             status_code = getattr(e, 'status_code', 'Unknown')
             logger.error(f"OpenAI API Error (Status {status_code}): {e}", exc_info=False)
             raise LLMError(f"OpenAI API error (Status {status_code}): {e}", provider="OpenAI") from e
        except OpenAIError as e: # Catch any other OpenAI specific errors
             logger.error(f"OpenAI Library Error: {e}", exc_info=True)
             raise LLMError(f"OpenAI library error: {e}", provider="OpenAI") from e
        except Exception as e:
            logger.error(f"Unexpected error during OpenAI API call: {e}", exc_info=True)
            raise LLMError(f"Unexpected error communicating with OpenAI: {e}", provider="OpenAI") from e