import openai
from flask import current_app

def get_llm_completion(messages_history, model=None, temperature=0.7, max_tokens=1024):
    """
    Interacts with the LLM using OpenAI compatible API.
    :param messages_history: List of message dicts, e.g., [{"role": "user", "content": "Hello"}]
    :param model: The model to use (e.g., "gpt-3.5-turbo")
    :param temperature: Controls randomness (0.0 to 2.0)
    :param max_tokens: Max tokens in response
    :return: Assistant's response content or None if error
    """
    api_key = current_app.config.get('OPENAI_API_KEY')
    api_base = current_app.config.get('OPENAI_API_BASE')
    
    # Get system-configured model if not specified, else use default from config
    if not model:
        from app.models import SystemSetting # Avoid circular import
        setting_model = SystemSetting.query.filter_by(key='llm_model').first()
        model = setting_model.value if setting_model else current_app.config.get('DEFAULT_LLM_MODEL')

    client = openai.OpenAI(api_key=api_key, base_url=api_base)

    try:
        current_app.logger.info(f"Sending to LLM: {messages_history} with model {model}")
        response = client.chat.completions.create(
            model=model,
            messages=messages_history,
            temperature=temperature,
            max_tokens=max_tokens
        )
        assistant_response = response.choices[0].message.content
        current_app.logger.info(f"LLM Response: {assistant_response}")
        return assistant_response.strip()
    except Exception as e:
        current_app.logger.error(f"Error interacting with LLM: {e}")
        # You might want to return the error message or a more specific error
        return f"Error: Could not get response from LLM. {str(e)}"