"""
LLM factory for creating language model instances with OpenRouter support.
"""
import os
from typing import Optional, Dict, Any
from langchain_openai import ChatOpenAI
from dotenv import load_dotenv

# Load environment variables
load_dotenv()


def create_llm(
    model: Optional[str] = None,
    temperature: float = 0.1,
    config: Optional[Dict[str, Any]] = None
) -> ChatOpenAI:
    """
    Create a language model instance with OpenRouter or OpenAI support.

    Priority order for configuration:
    1. Function parameters
    2. Config dictionary
    3. Environment variables
    4. Default values

    Args:
        model: Model name (e.g., 'openai/gpt-4o', 'openai/gpt-5')
        temperature: Temperature for generation (0.0-2.0)
        config: Optional configuration dictionary

    Returns:
        ChatOpenAI instance configured for OpenRouter or OpenAI

    Environment Variables:
        OPENROUTER_API_KEY: API key for OpenRouter (primary)
        OPENROUTER_BASE_URL: Base URL for OpenRouter (default: https://openrouter.ai/api/v1)
        LLM_MODEL: Default model to use (default: openai/gpt-4o)
        OPENAI_API_KEY: Fallback to standard OpenAI API

    Examples:
        # Using OpenRouter with environment variables
        llm = create_llm()

        # Specifying model
        llm = create_llm(model='openai/gpt-5')

        # Using config dictionary
        llm = create_llm(config={'llm_model': 'openai/gpt-4o', 'temperature': 0.2})
    """
    config = config or {}

    # Determine model
    final_model = (
        model or
        config.get('llm_model') or
        os.getenv('LLM_MODEL') or
        'openai/gpt-4o'
    )

    # Determine temperature
    final_temperature = (
        temperature if temperature != 0.1 else  # 0.1 is default, check if overridden
        config.get('temperature', 0.1)
    )

    # Check for OpenRouter configuration
    openrouter_api_key = os.getenv('OPENROUTER_API_KEY')
    openrouter_base_url = os.getenv('OPENROUTER_BASE_URL', 'https://openrouter.ai/api/v1')

    if openrouter_api_key:
        # Use OpenRouter
        return ChatOpenAI(
            model=final_model,
            temperature=final_temperature,
            openai_api_key=openrouter_api_key,
            openai_api_base=openrouter_base_url,
            default_headers={
                "HTTP-Referer": os.getenv('OPENROUTER_REFERER', 'https://github.com/attribution-agent'),
                "X-Title": os.getenv('OPENROUTER_TITLE', 'Attribution Analysis Agent')
            }
        )
    else:
        # Fallback to standard OpenAI
        openai_api_key = os.getenv('OPENAI_API_KEY')

        if not openai_api_key:
            raise ValueError(
                "No API key found. Please set either OPENROUTER_API_KEY or OPENAI_API_KEY "
                "in your environment variables or .env file."
            )

        # If using OpenAI directly, adjust model name if needed
        # Remove 'openai/' prefix if present for standard OpenAI API
        if final_model.startswith('openai/'):
            final_model = final_model.replace('openai/', '')

        return ChatOpenAI(
            model=final_model,
            temperature=final_temperature,
            openai_api_key=openai_api_key
        )


def get_llm_info() -> Dict[str, Any]:
    """
    Get information about current LLM configuration.

    Returns:
        Dictionary with configuration details
    """
    openrouter_api_key = os.getenv('OPENROUTER_API_KEY')
    openai_api_key = os.getenv('OPENAI_API_KEY')

    info = {
        'provider': 'openrouter' if openrouter_api_key else 'openai',
        'model': os.getenv('LLM_MODEL', 'openai/gpt-4o'),
        'base_url': os.getenv('OPENROUTER_BASE_URL', 'https://openrouter.ai/api/v1') if openrouter_api_key else 'https://api.openai.com/v1',
        'api_key_set': bool(openrouter_api_key or openai_api_key)
    }

    return info


# Backward compatibility: allow importing ChatOpenAI directly
__all__ = ['create_llm', 'get_llm_info', 'ChatOpenAI']
