import asyncio
import json

from openai import OpenAI
from openai import AsyncOpenAI
from openai.lib.streaming._assistants import AsyncAssistantEventHandler
from openai.types.beta import VectorStore
from openai.types.beta.assistant import Assistant
from openai.types.beta.thread import Thread
from openai.types.beta.threads.run import Run
from typing_extensions import override

from HomeAutoAI.common.Results import WebSocketResult
from llm.entity.dto.DeviceConfigAddDTO import DeviceConfigAddDTO
from llm.entity.metadatas import VectorStoreMetadata, MessageMetadata
from llm.service.DeviceService import DeviceService
from llm.prompt.OneNetPrompts import OneNetPrompt, OneNetParserPrompt

from llm.models import Assistants, Threads
from llm.entity.dao.AssistantsDAO import AssistantsDAO
from llm.entity.dao.VectorStoreDAO import VectorStoreDAO
from users.models import Users

client = OpenAI()


# from llm.service.DeviceService import DeviceService as Device
class FunctionEventHandler(AsyncAssistantEventHandler):
    """
    用于处理助理所需操作的自定义事件处理程序。
    """

    def __init__(self):
        super().__init__()
        self.results = ""  # 初始化一个空列表来存储结果
        self.client = OpenAI()

    @override
    async def on_event(self, event):
        """
        助理事件的事件处理程序。
        参数:
            event (Event): 要处理的事件
        """
        if event.event == 'thread.run.requires_action':
            run_id = event.data.id
            await self.handle_requires_action(event.data, run_id)

    async def handle_requires_action(self, data, run_id):
        print("开始处理助理事件")
        """
        处理助理所需的操作，特别是调用 get_temperature 函数。
        参数:
            data (dict): 与所需操作相关的数据
            run_id (str): 当前运行的ID
        """
        tool_outputs = []

        # for tool in data.required_action.submit_tool_outputs.tool_calls:
        #     if tool.function.name == "get_temperature":
        #         # 解析函数调用的参数
        #         arguments = json.loads(tool.function.arguments)
        #         city_name = arguments.get('city_name')
        #         state_name = arguments.get('state_name')
        #         # 调用 get_temperature 函数并准备输出
        #         temperature = get_temperature(city_name, state_name)
        #         tool_outputs.append({"tool_call_id": tool.id, "output": str(temperature)})
        for tool in data.required_action.submit_tool_outputs.tool_calls:
            if tool.function.name == "oneNET_device_control":
                # 解析函数调用的参数
                arguments = json.loads(tool.function.arguments)
                product_id = arguments.get('product_id')
                device_name = arguments.get('device_name')
                params = arguments.get('params')
                params_data = json.dumps(params)
                print(params_data)
                print(f'eventhandler内部：获取参数{product_id}，{device_name}，{params}')
                result = DeviceService.oneNET_device_control(product_id=product_id, device_name=device_name,
                                                             params=params)
                print(result)
                tool_outputs.append({"tool_call_id": tool.id, "output": str(result)})
            else:
                print(f'eventhandler内部：未知函数{tool.function.name}')

        # self.submit_tool_outputs(tool_outputs, run_id)

    # def submit_tool_outputs(self, tool_outputs, run_id):
    #     """
    #     提交助理调用的工具输出。
    #     参数:
    #         tool_outputs (list): 要提交的工具输出列表
    #         run_id (str): 当前运行的ID
    #     """
    #     with self.client.beta.threads.runs.submit_tool_outputs_stream(
    #             thread_id=self.current_run.thread_id,
    #             run_id=self.current_run.id,
    #             tool_outputs=tool_outputs,
    #             event_handler=EventHandler(),
    #     ) as stream:
    #         for text in stream.text_deltas:
    #             print(text, end="", flush=True)

    # TODO :设计code_interpreter，暂时用不上，先写着
    async def on_tool_call_created(self, tool_call):
        print(f"\n助理工具 > {tool_call.type}")
        if tool_call.type == "function":
            print(f'助理函数 > {tool_call.function}')

    async def on_tool_call_delta(self, delta, snapshot):
        if delta.type == 'code_interpreter':
            if delta.code_interpreter.input:
                print(delta.code_interpreter.input, end="", flush=True)
            if delta.code_interpreter.outputs:
                print(f"\n\n输出 >", flush=True)
                for output in delta.code_interpreter.outputs:
                    if output.type == "logs":
                        print(f"\n{output.logs}", flush=True)

    @override
    async def on_text_delta(self, delta, snapshot):
        """
        处理文本输出。适配WebSocket进行流式输出。
        :param delta:
        :param snapshot:
        :return:
        """
        print(delta.value, end="", flush=True)
        self.results += delta.value

    def get_results(self):
        return self.results


class ConsumerFunctionEventHandler(AsyncAssistantEventHandler):
    """
        用于处理助理所需操作的自定义事件处理程序。
        """

    def __init__(self, consumer):
        super().__init__()
        self.results = []  # 初始化一个空列表来存储结果
        self.client = OpenAI()
        self.consumer = consumer  # 用于访问WebSocket的send方法

    @override
    async def on_event(self, event):
        """
            助理事件的事件处理程序。
            参数:
                event (Event): 要处理的事件
            """
        if event.event == 'thread.run.requires_action':
            run_id = event.data.id
            self.handle_requires_action(event.data, run_id)

    async def handle_requires_action(self, data, run_id):
        """
            处理助理所需的操作，特别是调用 get_temperature 函数。
            参数:
                data (dict): 与所需操作相关的数据
                run_id (str): 当前运行的ID
            """
        tool_outputs = []

        # for tool in data.required_action.submit_tool_outputs.tool_calls:
        #     if tool.function.name == "get_temperature":
        #         # 解析函数调用的参数
        #         arguments = json.loads(tool.function.arguments)
        #         city_name = arguments.get('city_name')
        #         state_name = arguments.get('state_name')
        #         # 调用 get_temperature 函数并准备输出
        #         temperature = get_temperature(city_name, state_name)
        #         tool_outputs.append({"tool_call_id": tool.id, "output": str(temperature)})
        for tool in data.required_action.submit_tool_outputs.tool_calls:
            if tool.function.name == "oneNET_device_control":
                # 解析函数调用的参数
                arguments = json.loads(tool.function.arguments)
                product_id = arguments.get('product_id')
                device_name = arguments.get('device_name')
                params = arguments.get('params')
                result = DeviceService.oneNET_device_control(product_id=product_id, device_name=device_name,
                                                             params=params)
                tool_outputs.append({"tool_call_id": tool.id, "output": str(result)})

        # self.submit_tool_outputs(tool_outputs, run_id)

    # def submit_tool_outputs(self, tool_outputs, run_id):
    #     """
    #     提交助理调用的工具输出。
    #     参数:
    #         tool_outputs (list): 要提交的工具输出列表
    #         run_id (str): 当前运行的ID
    #     """
    #     with self.client.beta.threads.runs.submit_tool_outputs_stream(
    #             thread_id=self.current_run.thread_id,
    #             run_id=self.current_run.id,
    #             tool_outputs=tool_outputs,
    #             event_handler=EventHandler(),
    #     ) as stream:
    #         for text in stream.text_deltas:
    #             print(text, end="", flush=True)

    # 设计code_interpreter，暂时用不上，先写着
    async def on_tool_call_created(self, tool_call):
        print(f"\n助理工具 > {tool_call.type}")
        await asyncio.create_task(self.consumer.send(text_data=json.dumps({
            'tool': tool_call.type
        })))

    async def on_tool_call_delta(self, delta, snapshot):
        if delta.type == 'code_interpreter':
            if delta.code_interpreter.input:
                print(delta.code_interpreter.input, end="", flush=True)
                await asyncio.create_task(self.consumer.send(text_data=json.dumps({
                    'code_input': delta.code_interpreter.input
                })))
            if delta.code_interpreter.outputs:
                print(f"\n\n输出 >", flush=True)
                for output in delta.code_interpreter.outputs:
                    if output.type == "logs":
                        print(f"\n{output.logs}", flush=True)
                        await asyncio.create_task(self.consumer.send(text_data=json.dumps({
                            'code_output': output.logs
                        })))

    @override
    async def on_text_delta(self, delta, snapshot):
        """
            处理文本输出。适配WebSocket进行流式输出。
            :param delta:
            :param snapshot:
            :return:
            """
        print(delta.value, end="", flush=True)
        # 这里send的内容可根据需要进行修改，没什么需要加密的，所以直接用明文传输了
        await asyncio.create_task(self.consumer.send(text_data=json.dumps({
            'message': delta.value
        })))


class WebSocketOutputHandler(AsyncAssistantEventHandler):
    """
    用于处理WebSocket输出的自定义事件处理程序。
    """

    def __init__(self, consumer):
        super().__init__()
        self.results: str = ""  # 初始化一个空列表来存储结果
        self.client = OpenAI()
        self.consumer = consumer  # 用于访问WebSocket的send方法

    # 设计code_interpreter，暂时用不上，先写着
    async def on_tool_call_created(self, tool_call):
        print(f"\n助理工具 > {tool_call.type}")
        await asyncio.create_task(self.consumer.send(text_data=json.dumps({
            'tool': tool_call.type
        })))

    async def on_tool_call_delta(self, delta, snapshot):
        if delta.type == 'code_interpreter':
            if delta.code_interpreter.input:
                print(delta.code_interpreter.input, end="", flush=True)
                await asyncio.create_task(self.consumer.send(text_data=json.dumps({
                    'code_input': delta.code_interpreter.input
                })))
            if delta.code_interpreter.outputs:
                print(f"\n\n输出 >", flush=True)
                for output in delta.code_interpreter.outputs:
                    if output.type == "logs":
                        print(f"\n{output.logs}", flush=True)
                        await asyncio.create_task(self.consumer.send(text_data=json.dumps({
                            'code_output': output.logs
                        })))

    @override
    async def on_text_delta(self, delta, snapshot):
        """
        处理文本输出。适配WebSocket进行流式输出。
        :param delta:
        :param snapshot:
        :return:
        """
        print(delta.value, end="", flush=True)
        self.results += delta.value
        # 这里send的内容可根据需要进行修改，没什么需要加密的，所以直接用明文传输了
        await asyncio.create_task(self.consumer.send(WebSocketResult.success(data=delta.value)))

    def get_results(self):
        return self.results


class OpenAIAssistantService:

    @classmethod
    def creat_for_func(cls, user_id: str, assistant_name: str) -> Assistant:
        """
        为用户创建智能家居助手（Assistant），并与用户的user_id绑定,并存储到数据库中
        :param assistant_name:
        :param user_id:
        :return:
        """
        client = OpenAI()
        assistant = client.beta.assistants.create(
            model="gpt-4o-mini",
            name=assistant_name,
            instructions=OneNetPrompt,
            metadata={
                "user_id": user_id,
                "assistant_type": "智能家居助手-执行",
                "usage": "oneNET平台设备控制"
            },
            tools=[
                {
                    "type": "function",
                    "function": {
                        "name": "oneNET_device_control",
                        "description": "用于和嵌入式组目前使用的OneNET平台进行通信，实现设备控制",
                        "strict": True,
                        "parameters": {
                            "type": "object",
                            "required": [
                                "product_id",
                                "device_name",
                                "params"
                            ],
                            "properties": {
                                "product_id": {
                                    "type": "string",
                                    "description": "产品ID"
                                },
                                "device_name": {
                                    "type": "string",
                                    "description": "设备名称"
                                },
                                "params": {
                                    "type": "object",
                                    "description": "控制参数，字典格式",
                                    "properties": {

                                    },
                                    "additionalProperties": False
                                }
                            },
                            "additionalProperties": False
                        }
                    }
                },
            ]
        )

        Assistants.objects.create(
            assistant_name=assistant.name,
            assistant_id=assistant.id,
            user_id_id=user_id
        )
        return assistant

    @classmethod
    def create_for_parser(cls, user: Users, assistant_name: str) -> Assistant:
        """
        为用户创建智能家居助手（Assistant），并与用户的user_id绑定,并存储到数据库中,用于指令解析
        :param user:
        :param assistant_name:
        :return:
        """
        client = OpenAI()

        assistant: Assistant = client.beta.assistants.create(
            model="gpt-4o-mini",
            name=assistant_name,
            instructions=OneNetParserPrompt,
            metadata={
                "user_id": user.user_id,
                "assistant_type": "智能家居助手-解析",
                "usage": "指令解析"
            },
            tools=[{"type": "file_search"}]
        )
        # Assistants.objects.create(
        #     assistant_name=assistant.name,
        #     assistant_id=assistant.id,
        #     user_id_id=user_id
        # )
        AssistantsDAO.add(assistant, user=user)
        return assistant

    #TODO: 这个create还需要多完善。同时要要添加function-calling(可调用的方法,相当于添加配置文件）
    # 和file_search(文件搜索)的功能,目前测试先用写死了的
    @classmethod
    def create(cls, user_id: str, assistant_name: str, prompt: str) -> Assistant:
        """
        创建智能家居助手（Assistant）,并与用户的user_id绑定,并存储到数据库中,不限制提示词
        :param prompt:
        :param user_id:
        :param assistant_name:
        :return:
        """

    @classmethod
    def update(cls, assistant_id: str) -> Assistant:
        """
        更新智能家居助手（Assistant）,为其添加function-calling(可调用的方法）
        :param assistant_id:
        :return:
        """
        client = OpenAI()
        client.beta.assistants.update(
            tools=[
                {

                }
            ]
        )
        pass


class OpenAIThreadService:
    @classmethod
    def create(cls, user_id: str) -> Thread:
        """
        创建一个新的线程，并与用户的user_id绑定,并存储到数据库中
        :param user_id:
        :return:
        """
        client = OpenAI()
        thread = client.beta.threads.create(
            metadata={
                "thread_name": "智慧家庭-执行窗口",
                "user_id": user_id
            }
        )
        Threads.objects.create(thread_id=thread.id, user_id_id=user_id)
        return thread


class OpenAIRunService:
    @classmethod
    async def arun_with_func_consumer(cls, message: str, user_id: str, assistant_id: str, thread_id: str,
                                      consumer):
        """
        向线程发送消息，并运行智能家居助手（Assistant）,执行本地函数调用
        :param consumer:
        :param message:
        :param user_id:
        :param assistant_id:
        :param thread_id:
        :return:
        """
        client = AsyncOpenAI()
        message = await client.beta.threads.messages.create(
            thread_id=thread_id,
            content=message,
            metadata={
                'user_id': user_id,
                'target_assistant_id': assistant_id,
                'thread_id': thread_id
            },
            role='user'
        )
        async with client.beta.threads.runs.stream(
                thread_id=thread_id,
                assistant_id=assistant_id,
                event_handler=ConsumerFunctionEventHandler(consumer=consumer)
        ) as stream:
            await stream.until_done()

    @classmethod
    async def arun_with_func(cls, message: str, user_id: str, assistant_id: str, thread_id: str, ) -> Run:
        """

        :param message:
        :param user_id:
        :param assistant_id:
        :param thread_id:
        :return:
        """
        client = AsyncOpenAI()
        message = await client.beta.threads.messages.create(
            thread_id=thread_id,
            content=message,
            metadata={
                'user_id': user_id,
                'target_assistant_id': assistant_id,
                'thread_id': thread_id
            },
            role='user'
        )
        event_handler = FunctionEventHandler()
        async with client.beta.threads.runs.stream(
                thread_id=thread_id,
                assistant_id=assistant_id,
                event_handler=event_handler
        ) as stream:
            await stream.until_done()
        cur_run = await stream.get_final_run()
        # cancelled_run=await client.beta.threads.runs.cancel(
        #     thread_id=thread_id,
        #     run_id=cur_run.id
        # )
        return cur_run

    @classmethod
    async def aparse_command(cls, message: str, assistant_id: str, thread_id: str) -> str:
        """
        解析指令并执行并给出指令的解析结果
        :param message:
        :param user_id:
        :param assistant_id:
        :param thread_id:
        :param consumer:
        :return:
        """
        # client = AsyncOpenAI()
        # async with client.beta.threads.runs.stream(
        #         thread_id=thread_id,
        #         assistant_id=assistant_id,
        #         event_handler=OpenAIService.WebSocketOutputHandler(consumer=consumer)
        # ) as stream:
        #     await stream.until_done()

        client = AsyncOpenAI()
        message = await client.beta.threads.messages.create(
            thread_id=thread_id,
            content=message,
            role='user',
            metadata=MessageMetadata(
                type='2'
            )
        )
        print(f'发出指令：{message.metadata}')
        stream_run = await client.beta.threads.runs.create(
            thread_id=thread_id,
            assistant_id=assistant_id,
            stream=True,
        )

        latest_assistant_message: str = ''
        # async for event in stream_run:
        #     if not hasattr(event.data, 'status'):  # 检查事件数据是否有 'status' 属性
        #         if hasattr(event.data, 'delta') and event.data.delta:
        #             for block in event.data.delta.content:
        #                 text = block.text.value
        #                 latest_assistant_message = text
        #                 print(text, end="", flush=True)

        async for event in stream_run:
            if hasattr(event.data, 'status'):  # 检查事件数据是否有 'status' 属性
                pass
            else:
                # print(f"事件 ID: {event.data.id} 没有 status 属性。")
                if hasattr(event.data, 'delta') and event.data.delta:
                    if hasattr(event.data.delta, 'content') and event.data.delta.content:
                        for block in event.data.delta.content:
                            text = block.text.value
                            latest_assistant_message += text
                            # print(text.encode('utf-8', errors='ignore').decode('utf-8'), end="", flush=True)

        if latest_assistant_message != '':
            print(f"最新的回复：{latest_assistant_message}")
            # 分解字符串为JSON数据交由后端去执行
            start_marker = "`json"
            end_marker = "`"
            start_index = latest_assistant_message.find(start_marker)
            end_index = None
            if start_index != -1:
                start_index += len(start_marker)
                end_index = latest_assistant_message.find(end_marker, start_index)

            if start_index != -1 and end_index != -1:
                parsed_message = latest_assistant_message[start_index:end_index].strip()
                print(f"解析出来的字符串：{parsed_message}")
                return parsed_message
            else:
                print(f"未找到JSON数据")
                return ""

    @classmethod
    async def cancel_run(cls, run_id, thread_id):
        client = AsyncOpenAI()
        cancelled_run = await client.beta.threads.runs.cancel(
            run_id=run_id,
            thread_id=thread_id
        )
        print(f"cancel_run:取消运行{run_id}成功")
        return cancelled_run


class OpenAIVectorStoreService:
    @classmethod
    def create(cls, vector_name: str, assistant_id: str, user: Users,
               metadata: VectorStoreMetadata, file_ids: list[str] = None) -> VectorStore:
        # TODO：metadata需要敲定
        if file_ids:
            vector_store = client.beta.vector_stores.create(
                file_ids=file_ids,
                name=vector_name,
                metadata=metadata.__dict__,
            )
        else:
            # 创建空向量库
            vector_store = client.beta.vector_stores.create(
                name=vector_name,
                metadata=metadata.__dict__,
            )
        client.beta.assistants.update(
            assistant_id=assistant_id,
            tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}},
        )

        VectorStoreDAO.add(vector_store, user=user)
        return vector_store

    @classmethod
    def attach_file(cls, deviceConfigAddDTO: DeviceConfigAddDTO, user: Users):
        """
        向向量库中添加文件
        :param deviceConfigAddDTO:
        :param user:
        :return:
        """
        # 添加给全屋助手
        # 获取当前用户的全屋助手id，以及默认的向量库id，进行对向量库的更新操作




class OPenAIFIleService:
    @classmethod
    def add(cls):
        pass
