"""
title: GF OPEN WEBUI 0.6 Pipe_AI_Agent_Python_PPTX.py
author: Gou Feng
author_url: https://github.com/goufeng928
funding_url: https://github.com/goufeng928
version: 0.1
"""

from pydantic import BaseModel, Field
from typing import Optional
# ..................................................
# Custom Import Package
import os
import pandas
import requests

# ##################################################

class Filter:
    class Valves(BaseModel):
        priority: int = Field(
            default=0, description="Priority level for the filter operations."
        )
        max_turns: int = Field(
            default=8, description="Maximum allowable conversation turns for a user."
        )         # 8 => Default Value
        pass

    class UserValves(BaseModel):
        max_turns: int = Field(
            default=4, description="Maximum allowable conversation turns for a user."
        )         # 4 => Default Value
        pass

    def __init__(self):

        self.debug = False

        self.OpenAI_20250505_API_V1_Key:str = (
            ".........................................."
            # Memo: ".........................................."
        )
        self.OpenAI_20250505_API_V1_URL:str = "https://spark-api-open.xf-yun.com/v1/chat/completions"
        self.OpenAI_20250505_API_V1_Model:str = "4.0Ultra"

        self.Internals_API_5000_URL:str = (
            "http://host.docker.internal:5000"
            # Memo: "http://192.168.1.1:5000"
            # Memo: "http://host.docker.internal:5000"
        )

        self.inlet_messages:list       = []
        self.inlet_metadata:dict       = {}
        self.inlet_metadata_files:list = []  # body: {"metadata": {..., "files": None / [...], ...}

        # Indicates custom file handling logic. This flag helps disengage default routines in favor of custom
        # implementations, informing the WebUI to defer file-related operations to designated methods within this class.
        # Alternatively, you can remove the files directly from the body in from the inlet hook
        # self.file_handler = True

        # Initialize 'valves' with specific configurations. Using 'Valves' instance helps encapsulate settings,
        # which ensures settings are managed cohesively and not confused with operational flags like 'file_handler'.
        self.valves = self.Valves()
        pass

    def Requests_POST_Ollama_0_6_API_Chat(self, URL:str, Model:str, Message_List:list) -> dict:
    
        # Message_List 应包含:
        # [
        #     {"role":      "user", "content": "Hello!"},
        #     {"role": "assistant", "content": "Hello, I am an AI assistant!"},
        #     {"role":      "user", "content": "What time is it?"},
        #     ......
        # ]
        # ..........................................
        # URL Similar to: http://127.0.0.1:11434/api/chat
        # ..........................................
        # 调用 Ollama 模型生成回复
        Rqs_JSON = {"model": Model, "messages":Message_List, "stream":False}
        # ..........................................
        Response = requests.post(url=URL, json=Rqs_JSON)
        # ..........................................
        Rsp_JSON = Response.json()
        # ..........................................
        # 获取模型响应:
        # - Rsp_Message = Rsp_JSON.get("message", {})
        # - Rsp_Content = Rsp_Message["content"]
        return Rsp_JSON

    def Requests_POST_OpenAI_20250505_API_V1_Chat_Completions(self, URL:str, Key:str, Model:str, Message_List:list) -> dict:
    
        # Message_List 应包含:
        # [
        #     {"role":    "system", "content": "Role setting: You are a knowledgeable assistant"},
        #     {"role":      "user", "content": "Hello!"},
        #     {"role": "assistant", "content": "Hello, I am an AI assistant!"},
        #     {"role":      "user", "content": "What time is it?"},
        #     ......
        # ]
        # ..........................................
        # URL Similar to: http://127.0.0.1:3001/v1/chat/completions
        # ..........................................
        # 调用 OpenAI 模型生成回复
        Rqs_Headers = {"Authorization": "Bearer %s" % Key, "Content-Type": "application/json"}
        Rqs_JSON    = {"model": Model, "messages": Message_List, "stream": False, "temperature": 0.7}
        # ..........................................
        Response = requests.post(url=URL, headers=Rqs_Headers, json=Rqs_JSON)
        Rsp_JSON = Response.json()
        # ..........................................
        # 获取模型响应:
        # - Rsp_Choices = Rsp_JSON.get("choices", [])
        # - Rsp_Message = Rsp_Choices[0]["message"]
        # - Rsp_Content = Rsp_Message["content"]
        # ..........................................
        return Rsp_JSON

    def inlet(self, body: dict, __user__: Optional[dict] = None) -> dict:
        # Modify the request body or validate it before processing by the chat completion API.
        # This function is the pre-processor for the API where various checks on the input can be performed.
        # It can also modify the request before sending it to the API.
        if slef.debug == True:
            print(f"inlet:{__name__}")
            print(f"inlet:body:{body}")
            print(f"inlet:user:{__user__}")

        self.inlet_messages       = body.get("messages", [])
        self.inlet_metadata       = body.get("metadata", {})
        self.inlet_metadata_files = self.inlet_metadata["files"]
        # ..........................................
        files_count:int = 0
        # ..........................................
        if (self.inlet_metadata_files != None):
            files_count = len(self.inlet_metadata_files)
        # ..........................................
        print("[Func 调试] files count:", files_count)  # -> Debug by GF
        # ..........................................
        user_message = self.inlet_messages[-1]["content"]
        if ("生成" in user_message or "制作" in user_message or "模板" in user_message) and \
           ("ppt"  in user_message or "pptx" in user_message or "PPT"  in user_message or "PPTX" in user_message):

            try:
                Headers   = {"Content-Type": "application/json"}
                JSON_Data = {"messages": [{"role": "user", "content": user_message}]}
                Response  = requests.post("http://192.168.1.1:5000/api/ai-agent-python-pptx", headers=Headers, json=JSON_Data)
                Rsp_Ctnt  = Response.json()["content"]
                # ..................................
                for Char in Rsp_Ctnt:
                    yield Char   # 使用 yield 逐字符生成, 实现流式响应
            except Exception as e:
                print(e)
                # ..................................
                return str(e)    # Pipe 管道中, return 要返回的内容将会直接输出在会话窗口

        else:
        
            try:
                Message_List:list = messages
                # ..................................
                # 使用 "Ollama" 响应用户请求
#               Rsp_JSON = self.Requests_POST_Ollama_0_6_API_Chat(
#                   "http://192.168.1.1:11434/api/chat", "deepseek-r1:1.5b", Message_List
#               )
#               Rsp_Ctnt = Rsp_JSON.get("message", {})["content"]
                # ..................................
                # 使用 "内部算力" 响应用户请求
                Rsp_JSON = self.Requests_POST_OpenAI_20250505_API_V1_Chat_Completions(
                    self.OpenAI_20250505_API_V1_URL,
                    self.OpenAI_20250505_API_V1_Key,
                    self.OpenAI_20250505_API_V1_Model, Message_List)
                Rsp_Ctnt = Rsp_JSON.get("choices", {})[0]["message"]["content"]
                # ..................................
                body["messages"][-1] = {"role": "user", "content": Rsp_Ctnt}

        if __user__.get("role", "admin") in ["user", "admin"]:
            messages = body.get("messages", [])

            max_turns = min(__user__["valves"].max_turns, self.valves.max_turns)
            if len(messages) > max_turns:
                raise Exception(
                    f"Conversation turn limit exceeded. Max turns: {max_turns}"
                )

        return body

    def outlet(self, body: dict, __user__: Optional[dict] = None) -> dict:
        # Modify or analyze the response body after processing by the API.
        # This function is the post-processor for the API, which can be used to modify the response
        # or perform additional checks and analytics.
        if slef.debug == True:
            print(f"outlet:{__name__}")
            print(f"outlet:body:{body}")
            print(f"outlet:user:{__user__}")

        return body

# Signed by GF.
