# 文心一言api访问

from typing import Any, Optional, Sequence
from openai.types.chat.chat_completion import (
    Choice
)
from llama_index.llms.openai_like import OpenAILike
from llama_index.core.base.llms.types import (
    ChatMessage,
    ChatResponse,
)
from llama_index.llms.openai.utils import (
    from_openai_message,
    to_openai_message_dicts,
)
from openai._utils import maybe_transform
from openai.types.chat import (
    ChatCompletionChunk,
    ChatCompletion,
    ChatCompletionMessage,
    completion_create_params,
)
from openai._base_client import make_request_options
from openai._streaming import Stream


class WenXinLLM(OpenAILike):
    def __init__(
        self,
        api_base: str = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro",
        is_chat_model: bool = True,
        api_key: Optional[str] = None,
        default_headers=None,
        **kwargs: Any,
    ) -> None:
        super().__init__(
            model="ERNIE-Bot 4.0",
            api_key=api_key,
            api_base=api_base,
            is_chat_model=is_chat_model,
            default_headers=default_headers,
            **kwargs,
        )
        if default_headers is None:
            default_headers = {'Content-Type': 'application/json'}

    def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
        client = self._get_client()
        message_dicts = to_openai_message_dicts(messages)
        if len(messages) == 2:
            _message = [message_dicts[1]]
            _system_prompt = message_dicts[0]["content"]
        else:
            # 英文前置词识别能力弱，替换为中文
            require = """
            你是一个专家问答系统，在提炼现有答案时，严格按照两种模式运行：
            1.使用新的上下文重写原始答案。
            2.如果新的上下文没有用处，重复原来的答案。
            不要在你的答案中直接引用原始答案或上下文。
            如果有疑问，只需重复原来的答案。
            """
            _message = message_dicts
            _message[0]["content"] = require + _message[0]["content"].split("answer.New")[1]
            _message[0]["content"] = _message[0]["content"].replace("Query", "问题")
            _message[0]["content"] = _message[0]["content"].replace("Original Answer", "原始答案")
            _message[0]["content"] = _message[0]["content"].replace("New Answer", "新答案")
            _system_prompt = None
        response = client.post(
            "/chat/completions_pro",
            body=maybe_transform(
                {
                    "messages": _message,
                    "model": self.model,
                    "stream": False,
                    "temperature": 0.1,
                },
                completion_create_params.CompletionCreateParams,
            ),
            options=make_request_options(
                extra_headers=None,
                extra_query={"access_token": self.api_key},
                extra_body={"disable_search": False, "enable_citation": False, "system": _system_prompt},
                timeout=60
            ),
            cast_to=ChatCompletion,
            stream=False,
            stream_cls=Stream[ChatCompletionChunk],
        )

        response = ChatCompletion(
            choices=[Choice(
                finish_reason="stop",
                index=0,
                logprobs=None,
                message=ChatCompletionMessage(
                    content=response.model_extra["result"],
                    function_call=None,
                    model_computed_fields={},
                    model_confg={"extra": "allow"},
                    model_extra={},
                    model_fields=response.model_fields,
                    model_fields_set={"content", "role"},
                    role="assistant",
                    tool_calls=None
                ),
                model_computed_fields={},
                model_confg={"extra": "allow"},
                model_extra={},
                model_fields=response.model_fields,
                model_fields_set=response.model_fields_set,
                role="assistant",
                tool_calls=None
            )],
            created=response.created,
            id=response.id,
            model=self.model,
            model_computed_fields=response.model_computed_fields,
            model_confg=response.model_config,
            model_extra={"request_id": response.id},
            model_fields=response.model_fields,
            model_fields_set=response.model_fields_set,
            object="chat.completion",
            system_fingerprint=None,
            usage=response.usage
        )
        openai_message = response.choices[0].message
        message = from_openai_message(openai_message)

        return ChatResponse(
            message=message,
            raw=response,
            additional_kwargs=self._get_response_token_counts(response),
        )

    @classmethod
    def class_name(cls) -> str:
        """Get class name."""
        return "WenXinLLM"

