import json
import logging
from collections.abc import Generator
from typing import Optional, Union, cast

import requests
import websocket
from requests.exceptions import ConnectionError, InvalidSchema, MissingSchema

from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (
    AssistantPromptMessage,
    PromptMessage,
    PromptMessageTool,
    SystemPromptMessage,
    UserPromptMessage,
)
from core.model_runtime.entities.model_entities import (
    AIModelEntity,
    FetchFrom,
    ModelFeature,
    ModelPropertyKey,
    ModelType,
    ParameterRule,
    ParameterType,
)
from core.model_runtime.errors.invoke import (
    InvokeAuthorizationError,
    InvokeBadRequestError,
    InvokeConnectionError,
    InvokeError,
    InvokeRateLimitError,
    InvokeServerUnavailableError,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.model_providers.picc.helper.cluster_rate_limit import ClusterRateLimit, MultipleRateLimit
from core.model_runtime.model_providers.picc.helper.request import (
    Message,
    MessageRole,
    get_request_body,
    get_request_headers,
)
from core.model_runtime.model_providers.picc.llm.picc_generate_errors import (
    BadRequestError,
    InsufficientAccountBalanceError,
    InternalServerError,
    InvalidAPIKeyError,
    InvalidAuthenticationError,
    RateLimitReachedError,
)


class PiccLargeLanguageModel(LargeLanguageModel):
    # 已对接的模型
    custom_model = ["deepseek_r1-32b", "deepseek_r1-70b", "deepseek_r1-671b", "qwen2.5"]
    # 哥模型对应的token长度
    modelMaxToken = {
        "deepseek_r1-32b": 60000,
        "deepseek_r1-70b": 100000,
        "deepseek_r1-671b": 30000,
        "qwen2.5": 131072,
        "default": 20000
    }

    def _invoke(
        self,
        model: str,
        credentials: dict,
        prompt_messages: list[PromptMessage],
        model_parameters: dict,
        tools: list[PromptMessageTool] | None = None,
        stop: list[str] | None = None,
        stream: bool = True,
        user: str | None = None,
    ) -> LLMResult | Generator:
        return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)

    def validate_credentials(self, model: str, credentials: dict) -> None:
        """
        Validate credentials for Baichuan model
        """
        try:
            url = credentials["base_url"]
            app_id = credentials["app_id"]
            ability_id = credentials["ability_id"]
            secret_id = credentials["secret_id"]
            secret_key = credentials["secret_key"]
            if not url:
                raise CredentialsValidateFailedError("Invalid server URL")
            if url.startswith("ws") or url.startswith("wss"):
                headers = get_request_headers(http_method="ws",
                                              app_id=app_id,
                                              secret_id=secret_id,
                                              secret_key=secret_key,
                                              ability_id=ability_id
                                              )
                params = get_request_body(prompt_messages=[Message(role=MessageRole.USER, content="ping\nAnswer: ")],
                                          credentials=credentials,
                                          model_parameters=None,
                                          is_init=True
                                          )
                ws = websocket.WebSocket()
                ws.connect(url=url, header=headers)
                ws.send(json.dumps(params.to_dict()))
                response = ws.recv()
                data = json.loads(response)
                if data["code"] != 0:
                    raise CredentialsValidateFailedError(f"validation error, response: {data}")
            elif url.startswith("http") or url.startswith("https"):
                headers = get_request_headers(http_method="POST",
                                              app_id=app_id,
                                              secret_id=secret_id,
                                              secret_key=secret_key,
                                              ability_id=ability_id
                                              )
                params = get_request_body(prompt_messages=[Message(role=MessageRole.USER, content="如何平衡生活与工作")],
                                          credentials=credentials,
                                          model_parameters=None,
                                          is_init=True
                                          )
                response = requests.post(url, headers=headers, json=params.to_dict())
                data = json.loads(response.text)
                if data["code"] != 0:
                    raise CredentialsValidateFailedError(f"validation error, response: {data}")
        except (ConnectionError, InvalidSchema, MissingSchema) as e:
            raise InvalidAuthenticationError(f"Invalid server URL: {e}")
        except Exception as e:
            raise CredentialsValidateFailedError(f"Internal server error: {e}")

    def get_num_tokens(
        self,
        model: str,
        credentials: dict,
        prompt_messages: list[PromptMessage],
        tools: list[PromptMessageTool] | None = None,
    ) -> int:
        return self._num_tokens_from_messages(prompt_messages, tools)

    def _num_tokens_from_messages(self, messages: list[PromptMessage], tools: list[PromptMessageTool]) -> int:
        """
        Calculate num tokens for Picc model
        it's a generate model, so we just join them by spe
        """
        messages = ",".join([message.content for message in messages])
        return self._get_num_tokens_by_gpt2(messages)

    # 无法从连接池获取连接时的回复
    over_rate_response = Message(
        role=MessageRole.USER,
        content="application concurrency reach the limit, try again later"
    )

    def _generate(
        self,
        model: str,
        credentials: dict,
        prompt_messages: list[PromptMessage],
        model_parameters: dict,
        tools: list[PromptMessageTool] | None = None,
        stop: list[str] | None = None,
        stream: bool = True,
        user: str | None = None,
    ) -> LLMResult | Generator:
        server_url = credentials["base_url"]
        app_id = credentials["app_id"]
        ability_id = credentials["ability_id"]
        secret_id = credentials["secret_id"]
        secret_key = credentials["secret_key"]
        model_rate_limit = credentials.get("rate_limit", ClusterRateLimit.UNLIMIT_RATE)
        application_rate_limit = model_parameters.get("rate_limit", ClusterRateLimit.UNLIMIT_RATE)
        if model_parameters.__contains__("rate_limit"):
            del model_parameters["rate_limit"]

        # 限流器
        rate_limiter = MultipleRateLimit(
            ClusterRateLimit(ClusterRateLimit.RateLimitType.MODEL, model, int(model_rate_limit)),
            ClusterRateLimit(ClusterRateLimit.RateLimitType.APPLICATION, user, int(application_rate_limit))
        )
        # dify格式message转picc格式message
        messages_list = self._convert_prompt_messages_to_picc_messages(prompt_messages)

        # 请求picc模型
        http_method = ""
        response_method = None
        if server_url.startswith("http"):
            http_method = "http"
            response_method = self._recv_blocking_message_from_http
        elif server_url.startswith("ws"):
            http_method = "ws"
            response_method = self._recv_stream_message_from_socket
        headers = get_request_headers(http_method=http_method,
                                      app_id=app_id,
                                      secret_id=secret_id,
                                      secret_key=secret_key,
                                      ability_id=ability_id
                                      )
        params = get_request_body(prompt_messages=messages_list,
                                  credentials=credentials,
                                  model_parameters=model_parameters)
        response = []
        try:
            resp = response_method(server=server_url,
                                       header=headers,
                                       params=params.to_dict(),
                                       rate_limiter=rate_limiter)
            if isinstance(resp, Message):
                # blocking 方式
                response.append(resp)
            else:
                # stream 方式
                response = resp
        except Exception as e:
            logging.exception("picc model error, exception: {}".format(str(e)))
            response.append(
                Message(
                    role=MessageRole.USER,
                    content="picc model error, exception: {}".format(str(e))
                )
            )

        # picc格式message转dify格式message
        if stream:
            return self._handle_chat_generate_stream_response(
                model=model, prompt_messages=prompt_messages, credentials=credentials, response=response
            )
        else:
            return self._handle_chat_generate_response(
                model=model, prompt_messages=prompt_messages, credentials=credentials, response=response
            )

    def _recv_stream_message_from_socket(self,
                                         server: str,
                                         header: dict,
                                         params: dict,
                                         stop_word: str = "",
                                         rate_limiter: MultipleRateLimit = None,

    ) -> Union[Message, Generator[Message, None, None]]:
        acquired_token_list = []
        try:
            acquired_token_list = rate_limiter.acquire_connection()
        except TimeoutError as e:
            logging.exception("application concurrency reach the limit, try again later")
            return self.over_rate_response
        ws = websocket.WebSocket()
        try:
            ws.connect(url=server, header=header)
            ws.send(json.dumps(params))
            while True:
                recv_message = ws.recv()
                if recv_message == stop_word:  # 接收到关闭帧
                    break
                recv_message = json.loads(recv_message)
                if recv_message["code"] == 0:
                    if recv_message["finish"] is True:
                        return
                    output_part = recv_message["result"]["output"]
                    yield Message(
                        role=MessageRole.ASSISTANT,
                        content=output_part
                    )
                else:
                    logging.exception("error generating message, message = {}".format(recv_message))
                    break
        except Exception as e:
            logging.exception("picc model error, exception: {}".format(str(e)))
            raise e
        finally:
            if ws.connected:
                ws.close()
            rate_limiter.release_connection(acquired_token_list)

    def _recv_blocking_message_from_http(self,
                                         server: str,
                                         header: dict,
                                         params: dict,
                                         stop_word: str = "",
                                         rate_limiter: MultipleRateLimit = None,
                                         ) -> Message:
        acquired_token_list = []
        try:
            acquired_token_list = rate_limiter.acquire_connection()
        except TimeoutError as e:
            logging.exception("application concurrency reach the limit, try again later")
            return self.over_rate_response

        resp = None
        try:
            resp = requests.post(server, headers=header, json=params)
        except Exception as e:
            logging.exception("picc model error, exception: {}".format(str(e)))
        finally:
            rate_limiter.release_connection(acquired_token_list)
        data = json.loads(resp.text)
        if data["code"] == 0:
            return Message(
                role=MessageRole.ASSISTANT,
                content=data["result"]["output"]
            )
        else:
            logging.exception("error generating message, message = {}".format(data))
            raise Exception("Received unexpected message")

    # todo 后期区分chat与completion, 添加token统计(需模型服务提供方支持)
    def _handle_chat_generate_response(
        self,
        model: str,
        prompt_messages: list[PromptMessage],
        credentials: dict,
        response: list[Message]
    ) -> LLMResult:
        # token 使用情况
        usage = self._calc_response_usage(
            model=model,
            credentials=credentials,
            prompt_tokens=0,
            completion_tokens=0,
        )
        content = ""
        for message in response:
            content += message.content
        return LLMResult(
            model=model,
            prompt_messages=prompt_messages,
            message=AssistantPromptMessage(
                content=content,
                tool_calls=[],
            ),
            usage=usage
        )

    # todo 后期区分chat与completion, 添加token统计(需模型服务提供方支持)
    def _handle_chat_generate_stream_response(
        self,
        model: str,
        prompt_messages: list[PromptMessage],
        credentials: dict,
        response: Generator[Message, None, None],
    ) -> Generator[LLMResultChunk, None, None]:
        for message in response:
            usage = None
            # if message.usage:
            #     usage = self._calc_response_usage(
            #         model=model,
            #         credentials=credentials,
            #         prompt_tokens=message.usage["prompt_tokens"],
            #         completion_tokens=message.usage["completion_tokens"],
            #     )
            yield LLMResultChunk(
                model=model,
                prompt_messages=prompt_messages,
                delta=LLMResultChunkDelta(
                    index=0,
                    message=AssistantPromptMessage(content=message.content, tool_calls=[]),
                    usage=usage,
                    finish_reason=None,
                ),
            )

    def _get_model_parameter_rules(self, base_model: str):
        default_parameter_rules = [
            ParameterRule(
                name="temperature",
                use_template="temperature",
                default=0.6,
                min=0.0,
                max=2.0,
                label=I18nObject(en_US="Temperature", zh_Hans="温度"),
                type=ParameterType.FLOAT,
            ),
            ParameterRule(
                name="max_tokens",
                use_template="max_tokens",
                default=10240,
                min=1,
                max=self.modelMaxToken[base_model],
                label=I18nObject(en_US="Max Tokens", zh_Hans="最大标记"),
                type=ParameterType.INT,
            ),
            ParameterRule(
                name="top_p",
                use_template="top_p",
                default=0.95,
                min=0.1,
                max=1.0,
                label=I18nObject(en_US="Top P", zh_Hans="Top P"),
                type=ParameterType.FLOAT,
            ),
            ParameterRule(
                name="top_k",
                use_template="top_k",
                default=20,
                min=1,
                max=99,
                label=I18nObject(en_US="Top K", zh_Hans="Top K"),
                type=ParameterType.INT,
            ),
            ParameterRule(
                name="repetition_penalty",
                use_template="frequency_penalty",
                default=1.0,
                label=I18nObject(en_US="Repetition penalty", zh_Hans="重复惩罚"),
                type=ParameterType.FLOAT,
            ),
            ParameterRule(
                name="rate_limit",
                use_template="rate_limit",
                default=1,
                min=1,
                max=5,
                label=I18nObject(en_US="concurrency limit", zh_Hans="并发限制"),
                type=ParameterType.INT,
            ),
        ]
        qwen_parameter_rules = [
            ParameterRule(
                name="temperature",
                use_template="temperature",
                default=0.3,
                min=0.0,
                max=1.0,
                label=I18nObject(en_US="Temperature", zh_Hans="温度"),
                type=ParameterType.FLOAT,
            ),
            ParameterRule(
                name="max_new_tokens",
                use_template="max_tokens",
                default=512,
                min=1,
                max=20000,
                label=I18nObject(en_US="Max new tokens", zh_Hans="Max new tokens"),
                type=ParameterType.INT,
            ),
            ParameterRule(
                name="max_length",
                use_template="max_tokens",
                default=512,
                min=1,
                max=128000,
                label=I18nObject(en_US="Max length", zh_Hans="Max length"),
                type=ParameterType.INT,
            ),
            ParameterRule(
                name="num_return_sequences",
                use_template="max_tokens",
                default=512,
                min=1,
                max=1280000,
                label=I18nObject(en_US="Num return sequences", zh_Hans="Num return sequences"),
                type=ParameterType.INT,
            ),
            ParameterRule(
                name="top_p",
                use_template="top_p",
                default=0.8,
                min=0.1,
                max=1.0,
                label=I18nObject(en_US="Top P", zh_Hans="Top P"),
                type=ParameterType.FLOAT,
            ),
            ParameterRule(
                name="top_k",
                use_template="top_k",
                default=10,
                min=1,
                max=20,
                label=I18nObject(en_US="Top K", zh_Hans="Top K"),
                type=ParameterType.INT,
            ),
            ParameterRule(
                name="repetition_penalty",
                use_template="frequency_penalty",
                default=1.0,
                label=I18nObject(en_US="Repetition penalty", zh_Hans="重复惩罚"),
                type=ParameterType.FLOAT,
            ),
            ParameterRule(
                name="diversity_penalty",
                use_template="frequency_penalty",
                default=1.0,
                label=I18nObject(en_US="Diversity penalty", zh_Hans="多样性惩罚"),
                type=ParameterType.FLOAT,
            ),
            ParameterRule(
                name="length_penalty",
                use_template="frequency_penalty",
                default=1.0,
                label=I18nObject(en_US="Length penalty", zh_Hans="生成序列长度惩罚"),
                type=ParameterType.FLOAT,
            ),
            ParameterRule(
                name="rate_limit",
                use_template="rate_limit",
                default=1,
                min=1,
                max=5,
                label=I18nObject(en_US="concurrency limit", zh_Hans="并发限制"),
                type=ParameterType.INT,
            ),
        ]
        if base_model == "qwen2.5":
            parameter_rule = qwen_parameter_rules
        else:
            parameter_rule = default_parameter_rules
        return parameter_rule

    def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
        """
        used to define customizable model schema
        """

        # todo 后期做内部模型名称与底层模型名称的映射
        base_model = model
        if base_model.__contains__("qwen"):
            base_model = "qwen2.5"
        if base_model not in self.custom_model:
            base_model = "default"

        default_model_feature = [ModelFeature.AGENT_THOUGHT,
                                 ModelFeature.TOOL_CALL,
                                 ModelFeature.MULTI_TOOL_CALL,
                                 ModelFeature.STREAM_TOOL_CALL]
        model_properties = {
            ModelPropertyKey.CONTEXT_SIZE: self.modelMaxToken[base_model],
            ModelPropertyKey.MODE: LLMMode.CHAT.value
        }

        parameter_rule = self._get_model_parameter_rules(base_model)

        entity = AIModelEntity(
            model=model,
            label=I18nObject(zh_Hans=model, en_US=model),
            model_type=ModelType.LLM,
            features=list(default_model_feature),
            fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
            model_properties=model_properties,
            parameter_rules=parameter_rule
        )

        return entity

    @property
    def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
        """
        Map model invoke error to unified error
        The key is the error type thrown to the caller
        The value is the error type thrown by the model,
        which needs to be converted into a unified error type for the caller.

        :return: Invoke error mapping
        """
        return {
            InvokeConnectionError: [],
            InvokeServerUnavailableError: [InternalServerError],
            InvokeRateLimitError: [RateLimitReachedError],
            InvokeAuthorizationError: [
                InvalidAuthenticationError,
                InsufficientAccountBalanceError,
                InvalidAPIKeyError,
            ],
            InvokeBadRequestError: [BadRequestError, KeyError],
        }

    def _convert_prompt_messages_to_picc_messages(
            self, prompt_messages: list[PromptMessage], rich_content: bool = False
    ) -> list[Message]:
        messages_list = []
        for prompt_message in prompt_messages:
            message = self._convert_prompt_message_to_picc_message(prompt_message)
            if message:
                messages_list.extend(message)
        return messages_list

    def _convert_prompt_message_to_picc_message(self, message: PromptMessage) -> list[Message]:
        """
        Convert PromptMessage to dict for OpenAI Compatibility API
        """
        message_list = []
        if isinstance(message, UserPromptMessage):
            message = cast(UserPromptMessage, message)
            if isinstance(message.content, str):
                message_list.append(Message(role=MessageRole.USER, content=message.content))
            else:
                raise ValueError("User message content must be str")
        elif isinstance(message, AssistantPromptMessage):
            message = cast(AssistantPromptMessage, message)
            message_list.append(Message(role=MessageRole.ASSISTANT, content=message.content))
        elif isinstance(message, SystemPromptMessage):
            message = cast(SystemPromptMessage, message)
            message_list.append(Message(role=MessageRole.SYSTEM, content=message.content))
        else:
            raise ValueError(f"Unknown message type {type(message)}")

        return message_list