import asyncio
import os
import httpx
from loguru import logger
from dotenv import load_dotenv

# Load environment variables
load_dotenv()

# Get token from environment with validation
API_KEY = os.getenv('OPENROUTER_API_KEY')
if not API_KEY:
    raise ValueError(
        "OPENROUTER_API_KEY environment variable is required. "
        "Please set it in your .env file or environment."
    )

TOKEN = f"Bearer {API_KEY}"


async def _send_request(
    messages: list[dict[str, str]],
    model: str = "openai/gpt-4.1",
    response_format: str | None = None,
    plugins: list[dict[str, str | dict]] | None = None,
    client: httpx.AsyncClient | None = None,
    timeout: float = 600.0,
) -> str:
    """
    Send a request to the OpenRouter API with the provided messages and model.
    Args:
        messages: The messages to send in the request.
        model: The model to use for the request (default: "openai/gpt-4.1").
        response_format: Optional JSON schema for the response (default: None).
        plugins: Optional list of plugins to use (default: None).
        client: An optional httpx.AsyncClient instance to use for the request.
        timeout: Request timeout in seconds (default: 600.0).
    Returns:
        The content of the response message.
    """
    # Retry configuration
    max_retries = 3
    retry_delay = 1.0

    payload = {
        "model": model,
        "messages": messages,
    }
    if response_format:
        payload["response_format"] = response_format
    if plugins:
        payload["plugins"] = plugins

    args = {
        "url": "https://openrouter.ai/api/v1/chat/completions",
        "headers": {"Authorization": TOKEN, "Content-Type": "application/json"},
        "json": payload,
        "timeout": httpx.Timeout(timeout),
    }

    last_exception = None

    for attempt in range(max_retries + 1):
        try:
            if client is None:
                async with httpx.AsyncClient(timeout=httpx.Timeout(timeout)) as client:
                    response = await client.post(**args)
            else:
                response = await client.post(**args)
            response.raise_for_status()
            data = response.json()
            return data["choices"][0]["message"]["content"]
        # Only retry on TimeoutException and HTTPStatusError
        except Exception as e:
            last_exception = e
            if attempt < max_retries:
                wait_time = retry_delay * (2**attempt)  # Exponential backoff
                logger.warning(
                    f"Failed on attempt {attempt + 1}/{max_retries + 1} due to {type(e).__name__}, "
                    f"retrying in {wait_time}s..."
                )
                await asyncio.sleep(wait_time)

    # Raise the last timeout exception if all retries failed
    if last_exception:
        raise last_exception

    # to satisfy type checkers
    raise RuntimeError("Unexpected error: no exception was raised but no result was returned")


async def _send_async_requests(
    batch_messages: list[list[dict[str, str]]],
    model: str = "openai/gpt-4.1",
    response_format: str | None = None,
    plugins: list[dict[str, str | dict]] | None = None,
    max_concurrent: int = 5,
    timeout: float = 600.0,  # Set a larger timeout than the default as LLMs can take longer to respond
) -> list[str | None]:
    """
    Send multiple requests concurrently and return results as a list.
    Args:
        batch_messages: List of message lists to send
        model: The model to use for the request (default: "openai/gpt-4.1").
        response_format: Optional JSON schema for the response (default: None).
        plugins: Optional list of plugins to use (default: None).
        max_concurrent: Maximum number of concurrent requests
        timeout: Request timeout in seconds (default: 600.0).
    Returns:
        List of response contents in the same order as input (skipped for failed requests)
    """
    # Use a semaphore to limit concurrent requests
    semaphore = asyncio.Semaphore(max_concurrent)

    async def _send_with_semaphore(messages: list[dict[str, str]]) -> str:
        async with semaphore:
            return await _send_request(messages, model, response_format, plugins, client, timeout)

    async with httpx.AsyncClient(timeout=httpx.Timeout(timeout)) as client:
        tasks = [_send_with_semaphore(messages) for messages in batch_messages]
        results = await asyncio.gather(*tasks, return_exceptions=True)

    # Keep only successful results, skip failed requests
    processed_results = []
    for result in results:
        if not isinstance(result, Exception):
            processed_results.append(result)
        else:
            logger.error(f"Request failed with error: {type(result).__name__}: {str(result)}")
            processed_results.append(None)

    return processed_results


def send_async_requests(
    batch_messages: list[list[dict[str, str]]],
    model: str = "openai/gpt-4.1",
    response_format: str | None = None,
    plugins: list[dict[str, str | dict]] | None = None,
    max_concurrent: int = 5,
) -> list[str | None]:
    """
    Wrapper to handle the async message processing synchronously.
    Each of the elements in `batch_messages` will be sent as a separate request to the API.
    Args:
        batch_messages: List of message lists to send
        model: The model to use for the request (default: "openai/gpt-4.1").
        response_format: Optional JSON schema for the response (default: None).
        plugins: Optional list of plugins to use (default: None).
        max_concurrent: Maximum number of concurrent requests
        timeout: Request timeout in seconds (default: 600.0).
    """
    return asyncio.run(_send_async_requests(batch_messages, model, response_format, plugins, max_concurrent))
