# 通义千问api访问
import time
from typing import Any, Optional, Sequence
from openai.types.chat.chat_completion import (
    Choice
)
from llama_index.llms.openai_like import OpenAILike
from llama_index.core.base.llms.types import (
    ChatMessage,
    ChatResponse,
)
from llama_index.llms.openai.utils import (
    from_openai_message,
    to_openai_message_dicts,
)
from openai.types.chat import (
    ChatCompletionChunk,
    ChatCompletion,
    ChatCompletionMessage,
)
from openai._base_client import make_request_options
from openai._streaming import Stream


class QianWenLLM(OpenAILike):
    def __init__(
        self,
        model: str = None,
        api_base: str = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation",
        is_chat_model: bool = True,
        api_key: Optional[str] = None,
        default_headers=None,
        **kwargs: Any,
    ) -> None:
        super().__init__(
            model=model,
            api_key=api_key,
            api_base=api_base,
            is_chat_model=is_chat_model,
            default_headers=default_headers,
            **kwargs,
        )
        if default_headers is None:
            default_headers = {'Content-Type': 'application/json'}

    def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
        client = self._get_client()
        message_dicts = to_openai_message_dicts(messages)
        # if len(messages) == 2:
        #     _message = [message_dicts[1]]
        #     _system_prompt = message_dicts[0]["content"]
        # else:
        #     # 英文前置词识别能力弱，替换为中文
        #     # require = """
        #     # 你是一个专家问答系统，在提炼现有答案时，严格按照两种模式运行：
        #     # 1.使用新的上下文重写原始答案。
        #     # 2.如果新的上下文没有用处，重复原来的答案。
        #     # 不要在你的答案中直接引用原始答案或上下文。
        #     # 如果有疑问，只需重复原来的答案。
        #     # """
        #     _message = message_dicts
        #     # _message[0]["content"] = require + _message[0]["content"].split("answer.New")[1]
        #     # _message[0]["content"] = _message[0]["content"].replace("Query", "问题")
        #     # _message[0]["content"] = _message[0]["content"].replace("Original Answer", "原始答案")
        #     # _message[0]["content"] = _message[0]["content"].replace("New Answer", "新答案")
        #     _system_prompt = None
        response = client.post(
            "",
            body={
                "model": self.model,
                # "prompt": "",
                "input": {
                    "messages": message_dicts
                },
                "parameters": {
                    "temperature": 0.1,
                    "result_format": "message"
                },
            },
            options=make_request_options(
                extra_headers={"Authorization": f"Bearer {self.api_key}"},
                extra_query=None,
                extra_body=None,
                timeout=60
            ),
            cast_to=ChatCompletion,
            stream=False,
            stream_cls=Stream[ChatCompletionChunk],
        )

        response = ChatCompletion(
            choices=[Choice(
                finish_reason="stop",
                index=0,
                logprobs=None,
                message=ChatCompletionMessage(
                    content=response.model_extra["output"]["choices"][0]["message"]["content"],
                    function_call=None,
                    model_computed_fields={},
                    model_confg={"extra": "allow"},
                    model_extra={},
                    model_fields=response.model_fields,
                    model_fields_set={"content", "role"},
                    role="assistant",
                    tool_calls=None
                ),
                model_computed_fields={},
                model_confg={"extra": "allow"},
                model_extra={},
                model_fields=response.model_fields,
                model_fields_set=response.model_fields_set,
                role="assistant",
                tool_calls=None
            )],
            created=time.time_ns(),
            id=response.model_extra["request_id"],
            model=self.model,
            model_computed_fields=response.model_computed_fields,
            model_confg=response.model_config,
            model_extra={"request_id": response.model_extra["request_id"]},
            model_fields=response.model_fields,
            model_fields_set=response.model_fields_set,
            object="chat.completion",
            system_fingerprint=None,
            usage=response.usage
        )
        openai_message = response.choices[0].message
        message = from_openai_message(openai_message)

        return ChatResponse(
            message=message,
            raw=response,
            additional_kwargs=self._get_response_token_counts(response),
        )

    @classmethod
    def class_name(cls) -> str:
        """Get class name."""
        return "QianWenLLM"

