import os
from typing import Any, Callable, Dict, List, Optional, Union

from haystack.components.generators.openai_utils import (
    _convert_message_to_openai_format,
)
from haystack.dataclasses import ChatMessage, StreamingChunk
from litellm import acompletion
from litellm.types.utils import ModelResponse
import backoff

from src.core.provider import LLMProvider
from src.providers.llm import (
    build_chunk,
    build_message,
    check_finish_reason,
    connect_chunks,
)
from src.providers.loader import provider
from src.utils import remove_trailing_slash


@provider("litellm_llm")
class LitellmLLMProvider(LLMProvider):
    def __init__(
        self,
        model: str,
        api_base: Optional[str] = None,
        api_version: Optional[str] = None,
        kwargs: Optional[Dict[str, Any]] = None,
        timeout: float = 600.0,  # 增加到10分钟
        **_,
    ):
        self._model = model
        self._api_key = os.environ.get("OPENAI_API_KEY")
        self._api_base = remove_trailing_slash(api_base) if api_base else None
        self._api_version = api_version
        self._model_kwargs = kwargs
        self._timeout = timeout

    def get_generator(
        self,
        system_prompt: Optional[str] = None,
        generation_kwargs: Optional[Dict[str, Any]] = None,
        streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,
    ):
        combined_generation_kwargs = {**(generation_kwargs or {}), **self._model_kwargs}
        async def _run(
            prompt: str,
            generation_kwargs: Optional[Dict[str, Any]] = None,
            query_id: Optional[str] = None,
        ):
            message = ChatMessage.from_user(prompt)
            if system_prompt:
                messages = [ChatMessage.from_system(system_prompt), message]
            else:
                messages = [message]

            openai_formatted_messages = [
                _convert_message_to_openai_format(message) for message in messages
            ]

            generation_kwargs = {
                **combined_generation_kwargs,
                **(generation_kwargs or {}),
            }
            
            # Remove response_format completely to avoid format issues
            if 'response_format' in generation_kwargs:
                del generation_kwargs['response_format']
            
            # Extract common parameters
            temperature = generation_kwargs.pop("temperature", 0.7)
            max_tokens = generation_kwargs.pop("max_tokens", 100)
            
            # 添加重试机制
            @backoff.on_exception(
                backoff.expo, 
                Exception,  # 捕获所有异常
                max_tries=3,  # 最多重试3次
                max_time=900,  # 最长重试15分钟
                giveup=lambda e: False  # 不放弃任何异常
            )
            async def _call_with_retry():
                return await acompletion(
                    model=self._model,
                    api_key=self._api_key,
                    api_base=self._api_base,
                    api_version=self._api_version,
                    timeout=self._timeout,
                    messages=openai_formatted_messages,
                    temperature=temperature,
                    max_tokens=max_tokens,
                    stream=streaming_callback is not None,
                    **generation_kwargs,
                )
            
            # 使用重试机制调用API
            completion: Union[ModelResponse] = await _call_with_retry()
            completions: List[ChatMessage] = []
            if streaming_callback is not None:
                num_responses = generation_kwargs.pop("n", 1)
                if num_responses > 1:
                    raise ValueError(
                        "Cannot stream multiple responses, please set n=1."
                    )
                chunks: List[StreamingChunk] = []

                async for chunk in completion:
                    if chunk.choices and streaming_callback:
                        chunk_delta: StreamingChunk = build_chunk(chunk)
                        chunks.append(chunk_delta)
                        streaming_callback(
                            chunk_delta, query_id
                        )  # invoke callback with the chunk_delta
                completions = [connect_chunks(chunk, chunks)]
            else:
                completions = [
                    build_message(completion, choice) for choice in completion.choices
                ]

            # before returning, do post-processing of the completions
            for response in completions:
                check_finish_reason(response)

            return {
                "replies": [message.content for message in completions],
                "meta": [message.meta for message in completions],
            }

        return _run