# services/llm_service.py
# Encapsulates LiteLLM calling logic, handles LLM API interactions, and manages different model configurations.
# Detailed implementation will follow based on design documents.

import litellm
import httpx
import os # Added for os.environ
import json # Added for pretty printing messages
from typing import Union, Optional, Dict, Literal

class LLMService:
    def __init__(self):
        print("LLMService initialized.")
        # Enable LiteLLM verbose logging for debugging via environment variable
        os.environ['LITELLM_LOG'] = 'DEBUG'
        print("LiteLLM debug logging enabled via LITELLM_LOG environment variable.")
        # Potentially initialize other LiteLLM settings here if needed globally

    async def call_llm_api(self, messages: list, model_config: dict) -> str | None:
        model_name = model_config.get("model")
        if not model_name:
            print("LLMService error: 'model' not found in model_config.")
            return "Error: Model not specified."

        try:
            # Prepare parameters for litellm.acompletion
            # messages are passed directly to acompletion, not part of **model_config
            litellm_call_kwargs = model_config.copy()

            proxy_config_value = litellm_call_kwargs.pop("proxy_config", None)
            
            # httpx.AsyncClient arguments
            client_init_args = None
            if proxy_config_value is not None:
                client_init_args = dict()
                if proxy_config_value == "NO_PROXY":
                    client_init_args["proxy"] = None
                    client_init_args["trust_env"] = False
                elif isinstance(proxy_config_value,str):
                    client_init_args["proxy"] = proxy_config_value
                else:
                    print(f"Invalid proxy_config: {proxy_config_value}")
            # If proxy_config_value is None, or any other unhandled value
            # we will not create a custom client here based on user's decision to rely on env for specific proxies.
            # LiteLLM will use its default client which respects environment variables.

            if client_init_args:
                # Explicitly create and manage an httpx.AsyncClient for "NO_PROXY" or URL
                async with httpx.AsyncClient(**client_init_args) as client:
                    response = await litellm.acompletion(messages=messages, client=client, **litellm_call_kwargs)
            else:
                # Let LiteLLM handle client creation (will use env vars by default if proxy_config_value was None or unhandled)
                print(f"LLMService: Calling litellm.acompletion with model: {model_name}")
                print(f"LLMService: Messages being sent to LiteLLM:\n{json.dumps(messages, ensure_ascii=False, indent=2)}")
                print(f"LLMService: Other kwargs for LiteLLM: {litellm_call_kwargs}")
                response = await litellm.acompletion(messages=messages, **litellm_call_kwargs)

            # Extract content
            # LiteLLM's response structure can vary slightly based on the model/provider.
            # The most common path is response.choices[0].message.content
            # Sometimes it might be response['choices'][0]['message']['content'] if it's a dict.
            # LiteLLM aims to standardize this to ModelResponse.choices[0].message.content
            if response and response.choices and response.choices[0].message:
                content = response.choices[0].message.content
                print(f"LLMService received response from {model_name}.")
                return content
            else:
                print(f"LLMService error: Unexpected response structure from {model_name}: {response}")
                return "Error: Unexpected response structure from LLM."

        except litellm.exceptions.APIError as e:
            print(f"LLMService APIError for model {model_name}: {e}")
            return f"Error: LLM API error ({e.status_code}): {e.message}"
        except litellm.exceptions.RateLimitError as e:
            print(f"LLMService RateLimitError for model {model_name}: {e}")
            return "Error: LLM rate limit exceeded."
        except litellm.exceptions.AuthenticationError as e:
            print(f"LLMService AuthenticationError for model {model_name}: {e}")
            return "Error: LLM authentication failed. Check API key."
        except litellm.exceptions.BadRequestError as e: # Added for more specific input errors
            print(f"LLMService BadRequestError for model {model_name}: {e}")
            return f"Error: Bad request to LLM API ({e.status_code}): {e.message}"
        except litellm.exceptions.Timeout as e: # Added for timeout errors
            print(f"LLMService Timeout for model {model_name}: {e}")
            return "Error: LLM API call timed out."
        except Exception as e:
            print(f"LLMService an unexpected error occurred for model {model_name}: {e}")
            # Consider logging the full traceback here for debugging
            # import traceback
            # traceback.print_exc()
            return f"Error: An unexpected error occurred with the LLM call: {str(e)}"

if __name__ == '__main__':
    import asyncio
    import os # For API key example

    service = LLMService()

    async def test_llm_call():
        print("\n--- Running LLMService Test ---")
        # IMPORTANT: To run this test, you might need to set an API key environment variable.
        # For example, for OpenAI models: export OPENAI_API_KEY="your_key_here"
        # Or, provide it directly in model_config if your setup allows.

        # Test case 1: Successful call (assuming ollama/gemma3:27b-it-q8_0 and key is set)
        print("\nTest Case 1: Attempting call to ollama/gemma3:27b-it-q8_0...")
        model_config_gpt = {
            "model": "ollama/gemma3:27b-it-q8_0",
            # "api_key": os.environ.get("OPENAI_API_KEY"), # LiteLLM usually picks this up automatically
            "temperature": 0.5,
            "max_tokens": 50
        }
        messages_gpt = [{"role": "user", "content": "Hello, world! Tell me a short joke."}]
        
        response_gpt = await service.call_llm_api(messages_gpt, model_config_gpt)
        print(f"Response from ollama/gemma3:27b-it-q8_0: {response_gpt}")

        # Test case 2: Model not specified
        print("\nTest Case 2: Model not specified...")
        model_config_no_model = {}
        messages_no_model = [{"role": "user", "content": "Test"}]
        response_no_model = await service.call_llm_api(messages_no_model, model_config_no_model)
        print(f"Response (model not specified): {response_no_model}")

        # Test case 3: Potentially non-existent model or auth error (if no key for it)
        print("\nTest Case 3: Attempting call to a potentially misconfigured model (e.g., 'nonexistent-model')...")
        model_config_bad = {
            "model": "nonexistent-model-for-testing",
            "temperature": 0.7
        }
        messages_bad = [{"role": "user", "content": "This should fail."}]
        response_bad = await service.call_llm_api(messages_bad, model_config_bad)
        print(f"Response from nonexistent-model: {response_bad}")
        
        # Example with a different provider (e.g., Claude via Anthropic)
        # This would require ANTHROPIC_API_KEY to be set in the environment.
        # print("\nTest Case 4: Attempting call to claude-3-haiku-20240307 (requires ANTHROPIC_API_KEY)...")
        # model_config_claude = {
        #     "model": "claude-3-haiku-20240307",
        #     "max_tokens": 60
        # }
        # messages_claude = [{"role": "user", "content": "What is the capital of France?"}]
        # response_claude = await service.call_llm_api(messages_claude, model_config_claude)
        # print(f"Response from Claude Haiku: {response_claude}")

        print("\n--- LLMService Test Complete ---")

    # To run the test:
    # Ensure litellm is installed: pip install litellm
    # If testing with OpenAI, set OPENAI_API_KEY environment variable.
    # If testing with Anthropic, set ANTHROPIC_API_KEY, etc.
    try:
        asyncio.run(test_llm_call())
    except ImportError:
        print("Could not run test_llm_call because litellm or asyncio is not available.")
    except Exception as e:
        print(f"An error occurred during the test run: {e}")