import logging
from pathlib import Path
from typing import Optional

from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI

from app.config import settings

logger = logging.getLogger(__name__)


async def optimize_with_llm(file_path: Path, language: Optional[str] = None) -> str:
    content = file_path.read_text(encoding="utf-8")
    prompt = settings.system_prompt.format(language)
    user_content = settings.user_content.format(content)

    llm = ChatOpenAI(
        api_key=settings.api_key,
        base_url=settings.base_url,
        model=settings.model,
        max_retries=0,
        timeout=180,
        max_tokens=16384,
    )

    messages = [
        SystemMessage(content=prompt),
        HumanMessage(content=user_content),
    ]

    try:
        # Use streaming
        result = []
        async for chunk in llm.astream(messages):
            if hasattr(chunk, "content") and chunk.content:
                result.append(str(chunk.content))
        return "".join(result)
    except Exception as e:
        logger.error(f"LLM optimization failed for {file_path}: {repr(e)}, return origin content")
        # Re-raise the exception so it can be caught by the calling function
        # raise
        return content


async def describe_image_with_llm(image_path: Path, language: Optional[str] = None) -> str:
    """
    Use LLM to describe the content of an image, with a prompt in English suitable for RAG documentation.
    """
    from app.utils.image_utils import image_to_base64
    base64_str = image_to_base64(image_path)

    # English prompt for RAG
    prompt = (
        f"You are an expert in enterprise knowledge base documentation. Based on the provided image, "
        f"directly output a detailed description of the image content, without any formatting instructions. "
        f"The description should be concise and accurate, suitable for insertion into a Retrieval-Augmented Generation (RAG) document, "
        f"and optimized for downstream knowledge retrieval and question answering. "
        f"Language: {language or 'English'}."
    )

    llm = ChatOpenAI(
        api_key=settings.api_key,
        base_url=settings.base_url,
        model=settings.model,
        max_retries=0,
        timeout=180,
        max_tokens=16384,
    )

    messages = [
        SystemMessage(content=prompt),
        HumanMessage(content=[
            {"type": "text", "text": "Please describe the following image:"},
            {"type": "image_url", "image_url": f"data:image/png;base64,{base64_str}"},
        ]),
    ]

    try:
        result = []
        async for chunk in llm.astream(messages):
            if hasattr(chunk, "content") and chunk.content:
                result.append(str(chunk.content))
        return "".join(result)
    except Exception as e:
        logger.error(f"LLM image description failed for {image_path}: {str(e)}")
        return ""
