"""
LLM适配器模块

实现各种LLM适配器，用于兼容不同的API格式。
"""

import json
import time
from typing import Optional, Any

from llama_index.core.llms import CustomLLM
from llama_index.core.llms import (
    CompletionResponse,
    CompletionResponseGen,
    LLMMetadata,
)
from llama_index.core.llms.callbacks import llm_completion_callback
import requests


class CustomOpenAIAPIAdapter(CustomLLM):
    """
    完全自定义的OpenAI API适配器，直接使用HTTP请求

    支持任意模型名称，绕过LlamaIndex的模型名称验证限制。
    """

    # 必需的字段定义
    context_window: int = 4096
    num_output: int = 1024
    model_name: str = "custom"

    def __init__(
        self,
        api_key: str,
        api_base: str,
        model_name: str,
        timeout: int = 60,
        max_retries: int = 3,
        temperature: float = 0.1,
        max_tokens: Optional[int] = None
    ):
        super().__init__()
        self._api_key = api_key
        self._api_base = api_base
        self.model_name = model_name
        self._timeout = timeout
        self._max_retries = max_retries
        self._temperature = temperature
        self._max_tokens = max_tokens
        self.num_output = max_tokens or 1024

    @llm_completion_callback()
    def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
        """
        完成文本生成

        Args:
            prompt: 输入提示
            **kwargs: 额外的生成参数

        Returns:
            CompletionResponse: 生成的响应
        """
        headers = {
            "Authorization": f"Bearer {self._api_key}",
            "Content-Type": "application/json"
        }

        data = {
            "model": self.model_name,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": kwargs.get("temperature", self._temperature),
            "max_tokens": kwargs.get("max_tokens", self._max_tokens),
            "stream": False
        }

        for retry in range(self._max_retries):
            try:
                response = requests.post(
                    f"{self._api_base}/chat/completions",
                    headers=headers,
                    json=data,
                    timeout=self._timeout
                )
                response.raise_for_status()
                result = response.json()
                text = result["choices"][0]["message"]["content"]
                return CompletionResponse(text=text)
            except Exception as e:
                if retry == self._max_retries - 1:
                    raise Exception(f"LLM API调用失败: {e}")
                time.sleep(2 ** retry)

    @llm_completion_callback()
    def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
        """
        流式文本生成

        Args:
            prompt: 输入提示
            **kwargs: 额外的生成参数

        Yields:
            CompletionResponse: 流式响应片段
        """
        headers = {
            "Authorization": f"Bearer {self._api_key}",
            "Content-Type": "application/json"
        }

        data = {
            "model": self.model_name,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": kwargs.get("temperature", self._temperature),
            "max_tokens": kwargs.get("max_tokens", self._max_tokens),
            "stream": True
        }

        response_text = ""
        for retry in range(self._max_retries):
            try:
                response = requests.post(
                    f"{self._api_base}/chat/completions",
                    headers=headers,
                    json=data,
                    timeout=self._timeout,
                    stream=True
                )
                response.raise_for_status()

                for line in response.iter_lines():
                    if line:
                        line = line.decode('utf-8')
                        if line.startswith("data: "):
                            line = line[6:]  # 移除 "data: " 前缀
                            if line.strip() == "[DONE]":
                                break
                            try:
                                chunk = json.loads(line)
                                if "choices" in chunk and len(chunk["choices"]) > 0:
                                    delta = chunk["choices"][0].get("delta", {})
                                    if "content" in delta:
                                        token = delta["content"]
                                        # 检查token是否为None，避免字符串拼接错误
                                        if token is not None:
                                            response_text += token
                                            yield CompletionResponse(text=response_text, delta=token)
                            except json.JSONDecodeError:
                                continue
                break
            except Exception as e:
                if retry == self._max_retries - 1:
                    raise Exception(f"LLM流式API调用失败: {e}")
                time.sleep(2 ** retry)

    @property
    def metadata(self) -> LLMMetadata:
        """获取LLM元数据"""
        return LLMMetadata(
            context_window=self.context_window,
            num_output=self.num_output,
            model_name=self.model_name,
        )