from overrides import override
from copy import deepcopy
from fastapi import Depends
import typing as T
from openai import AsyncOpenAI
from milvus_model.hybrid.mgte import MGTEEmbeddingFunction
import logging
from pydantic import BaseModel, Field

from ....common.constant.chat import ChatPrompt, MessageContentFactory, MessageType
from ..ChatService import ChatService as _ChatService
from ....init import user2chat_history, OpenAISession, configuration
from ....pojo import ChatRequestDTO, ChatPlusRequestDTO, GeneratorImageDTO
from ....pojo.dto.ChatBaseModel import ChatBaseModel
from ....common.utils.AsyncGenerateResponse import async_generate_response
from ....common.constant.chat import RoleType
from ....common.knowledge_formatter import generator
from ....init.Global import EmbeddingFunction, GenoratorImage
from ...mapper.RAGMapper import _RAGMapper, RAGMapper


class ChatServiceImpl(_ChatService):
    embeddingFunction: MGTEEmbeddingFunction = None
    openaiSession: AsyncOpenAI = None
    ragMapper: _RAGMapper = None

    def __call__(
        self,
        openaiSession: OpenAISession,
        embeddingFunction: EmbeddingFunction,
        ragMapper: RAGMapper,
    ):
        self.openaiSession = openaiSession
        self.embeddingFunction = embeddingFunction
        self.ragMapper = ragMapper
        return self

    @override
    async def chat(self, chatRequest: ChatRequestDTO):
        history = user2chat_history.get([])
        history.append(
            [
                RoleType.USER,
                [ChatPrompt.chat.format(question=chatRequest.question)],
            ]
        )
        messages = deepcopy(ChatPrompt.messages)
        messages.extend(
            [
                {"role": m[0], "content": m[1]}
                for m in history[-(configuration.max_history + 1) :]
            ]
        )
        return await self._create_chat(history, messages, chatRequest)

    @override
    async def chatPlus(self, chatPlusRequest: ChatPlusRequestDTO):
        history = user2chat_history.get([])

        question = chatPlusRequest.question
        knowledgeBaseId = chatPlusRequest.knowledgeBaseId

        generatorParams = ChatServiceImpl.GenneratorParams(
            question=question,
            knowledgeBaseId=knowledgeBaseId,
            exclude_image=chatPlusRequest.exclude_image,
            top_n=chatPlusRequest.top_n,
        )
        information = await self._generator(generatorParams)

        messages = deepcopy(ChatPrompt.messages)

        messages.extend(
            [
                {"role": m[0], "content": m[1]}
                for m in history[-(configuration.max_history + 1) :]
            ]
        )

        messages.append(
            {
                "role": RoleType.USER,
                "content": [
                    ChatPrompt.chatPlus.format(question=question, context=information)
                ],
            }
        )

        history.append(
            [
                RoleType.USER,
                [ChatPrompt.chat.format(question=question)],
            ]
        )

        return await self._create_chat(history, messages, chatPlusRequest)

    @override
    async def generator_image(self, generatorImageDTO: GeneratorImageDTO):
        result = await GenoratorImage.create_task_and_wait(
            generatorImageDTO.model_dump(exclude_none=True)
        )
        if result.type == "create_error":
            return "创建任务失败", result.message
        elif result.type == "timeout":
            return "任务超时", None
        elif result.type == "failed":
            return "任务失败", result.message
        elif result.type == "succeeded":
            results = []
            for entity in result.results:
                url = entity.get("url")
                if url:
                    results.append(url)
            return True, results

    async def _create_chat(
        self, history: list, messages: list[dict], createParams: ChatBaseModel
    ):
        choice = await self.openaiSession.chat.completions.create(
            model=createParams.model,
            messages=messages,
            stream=createParams.stream,
            temperature=createParams.temperature,
            frequency_penalty=createParams.frequency_penalty,
            presence_penalty=createParams.presence_penalty,
            max_completion_tokens=createParams.max_completion_tokens,
            top_p=createParams.top_p,
            modalities=createParams.modalities,
        )

        if createParams.stream:
            history.append(
                [
                    RoleType.AI,
                    [
                        MessageContentFactory.create(
                            type=MessageType.TEXT, text=""
                        ).model_dump()
                    ],
                ]
            )
            return async_generate_response(history, choice)

        content = choice.choices[0].message.content
        history.append(
            [
                RoleType.AI,
                [
                    MessageContentFactory.create(
                        type=MessageType.TEXT, text=content
                    ).model_dump()
                ],
            ]
        )

        return content

    class GenneratorParams(BaseModel):
        question: str = Field(description="问题")
        knowledgeBaseId: int = Field(description="知识库id")
        top_n: T.Optional[int] = Field(default=1, description="返回知识数量")
        collection_name: T.Optional[str] = Field(
            default="default", description="数据库名称"
        )
        exclude_image: T.Optional[bool] = Field(
            default=False, description="是否排除图片"
        )

    async def _generator(self, params: GenneratorParams):
        query_embedding = (
            self.embeddingFunction.encode_queries([params.question])
            .get("dense")[0]
            .detach()
            .cpu()
            .numpy()
        )

        know = await self.ragMapper.searchByQueryAndKBId(
            query_embeddiing=query_embedding,
            knowledgeBaseId=params.knowledgeBaseId,
            top_n=params.top_n,
            collection_name=params.collection_name,
            exclude_image=params.exclude_image,
        )
        connection_dict = dict()
        for entity in know:
            type = entity.get("entity").get("metadata").get("type")
            if type not in connection_dict:
                connection_dict[type] = []
            connection_dict[type].append(entity.get("entity").get("content"))

        information = generator(connection_dict)
        logging.info(f"\n\n问题:\n\n{params.question}\n\n检索结果:\n\n{information}")
        return information


ChatService = T.Annotated[ChatServiceImpl, Depends(ChatServiceImpl())]
