# -*- coding: utf-8 -*-
import json
import os
from typing import Dict
import xml.etree.ElementTree as ET
import operator
from typing import Annotated, Any
from typing_extensions import TypedDict
from string import Template
from langgraph.graph import StateGraph, START, END
from ApiBase import apiBase
from ApiModels import apiModels
import re
# rag
from langchain_openai import ChatOpenAI
# pip install "langchain-unstructured[local]"

# agent
from langchain.pydantic_v1 import BaseModel, Field
from langchain.agents import AgentType, initialize_agent
from langchain.chains import LLMMathChain
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.tools import Tool, tool
from langchain_core.prompts import ChatPromptTemplate
from langchain.agents import AgentExecutor, create_react_agent
from langchain import hub
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain_core.tools import StructuredTool

from ApiVector import LlmDbVector
from ApiTol import ApiTol
from ApiAct import ApiAct
from ApiSft import ApiSft
from ApiGrp import ApiGrp
from ApiRft import ApiRft

class PrimeInput(BaseModel):
    param: str = Field(default="1")

class LangGraphState(TypedDict):
    # The operator.add reducer fn makes this append-only
    aggregate: Annotated[list, operator.add]
    curNodeName: str    


class LangGraphNode:
    def __init__(self, dir: str, name: str):
        self.dir = dir
        self.name = name

    def __call__(self, state: LangGraphState) -> Any:
        print(f"Adding {self.name} to {state['aggregate']}")
        state['curNodeName']=self.name
        return {"aggregate": [self.name]}
    

class ApiTools:
    """ 自动获取api目录下面的所有文件名称， 并且把名字列出来"""
    # 保证单实例
    _instance = None
    def __new__(cls, *args, **kwargs):
        if cls._instance is None:
            cls._instance = super(ApiTools, cls).__new__(cls, *args, **kwargs)
        return cls._instance
    
    def __init__(self):
        print("ApiTools init")
        # chroma数据库地址
        self.dir = os.path.dirname(os.path.abspath(__file__))
        # langchain.debug = True
        # print(apiBase.prop_read("OPENAI_API_TYPE"))
        # print(apiBase.prop_read("OPENAI_API_BASE"))
        # apiBase.prop_read("OPENAI_API_BASE") = "http://10.220.130.88:11400/v1/"
        self.model_name = apiBase.prop_read("OPENAI_MODEL")
        # self.model_name = "Qwen2-1.5B-Instruct"
        # print(apiBase.prop_read("OPENAI_API_KEY"))
        # print(apiBase.prop_read("OPENAI_API_VERSION"))
        apiBase.prop_write('HF_HOME', self.dir + "/huggingface/")
        apiBase.prop_write('TRANSFORMERS_CACHE', self.dir + "/huggingface/hub")

        self.cn_vecname = 'apitools'
        self.cn_vecdir = self.dir + apiBase.prop_read("VEC_DIR")
        #self.cn_abstractdir = apiBase.prop_read("ABSTRACT_DIR")
        #self.cn_abstractname = "doc" + apiBase.generate_md5(self.cn_abstractdir)

        # self.llm = FastChatLLM(model=self.model, max_tokens=100)
        self.llm = ChatOpenAI(model_name=self.model_name)        
        
        # 创建agent，可以自动把不同的api连接起来
        self.doc_path = self.dir + "/docs/"
        self.tools_path = self.dir + "/tools/"
        self.vect_client=None
        self.grp_client=None
        self.tol_client=None
        self.rft_client=None
        self.sft_client=None
        self.act_client=None
        self.act_client=None
        
    
    def load_models(self):
        return apiModels    
    # 加载向量库对象
    def load_vec(self)->LlmDbVector:
        if self.vect_client:
            return self.vect_client
        #print("[apiTools]load_vec")
        self.vect_client = LlmDbVector(llm=self.llm)
        return self.vect_client
    # 加grp对象
    def load_grp(self)->ApiGrp:
        if self.grp_client:
            return self.grp_client
        self.grp_client = ApiGrp(self)
        return self.grp_client
    # 加载工具对象
    def load_tol(self)->ApiTol:   
        if  self.tol_client:
            return self.tol_client    
        self.tol_client = ApiTol(llm=self.llm,vector=self.load_vec())
        return self.tol_client    
    # 加载评估反思对象
    def load_rft(self)->ApiRft:
        if self.rft_client:
            return self.rft_client
        self.rft_client = ApiRft(model_name=self.model_name, llm=self.llm,vector=self.load_vec())     
        return self.rft_client    
    # 加载安全对象
    def load_sft(self)->ApiSft:
        if self.sft_client:
            return self.sft_client
        self.sft_client = ApiSft(llm=self.llm,vector=self.load_vec())
        return self.sft_client    
    # 加载action对象
    def load_act(self)->ApiAct:
        if self.act_client:
            return self.act_client
        self.act_client = ApiAct(llm=self.llm,vector=self.load_vec())
        return self.act_client
    
    # 字符串截取
    def cut_msg(self, msg, startchar, endchar):
        start_index = -1
        for word in startchar:
            start_index = msg.find(word)
            if start_index != -1:
                break
        if start_index == -1:
            return None
        end_index = -1
        for word in endchar:
            end_index = msg.rfind(word)
            if end_index != -1:
                break
        if end_index == -1:
            return None
        if end_index <= start_index:
            return None
        return msg[start_index:end_index]

    def get_cls(self, messages):
        if messages is None:
            return 'other'
        if messages.startswith("命令"):
            return 'cmd'
        if messages.startswith("rag"):
            return 'rag'
        if messages.startswith("api"):
            return 'api'
        # if self.similarity(messages,"命令") > 50:
        #     return 'cmd'
        # if self.similarity(messages,"rag") > 50:
        #     return 'rag'
        # if self.similarity(messages,"api") > 50:
        #     return 'api'
        return 'other'

    # 文档查询
    def apitools_query(self, prompt):
        # 重复两次，防止出错
        try:
            # Query Data
            res = self.apitools_index.run(prompt)
            str = res.response.replace('```json', '')
            str = str.replace('```', '')
            resp = json.loads(str)
            print(resp.get('label'), resp.get('param'), resp.get('api-param'))
            # 自动添加到向量库
            lb = resp.get('label')
            if lb == 'unknown':
                lb = None
            return resp
        except Exception as e:
            print(e)
        return None

    # rag对话
    def chat(self, messages):
        apiBase.log(f"chat:{messages}")
        messages = messages.strip()
        chatResp = self.help(messages)
        if chatResp is not None:
            return chatResp
        # 分类
        mes = f'<文本>{messages}</文本>.从<文本>中提取的实体的分类:"命令", "rag", "api","other"。按json格式输出:'
        chatResp = self.has_old(messages)
        if chatResp is not None:
            return chatResp
            # timestamp = os.path.getmtime(self.cn_vecdir)
        # print(f"has_old:{self.apitools_time} {timestamp}")
        try:
            ret = None
            # 加快文本分类识别的速度,调用大模型太慢了
            cls_type = self.get_cls(messages)
            if cls_type == "cmd":
                cls = self.apitools_query(mes)
                ret = apiBase.call_cmd(cls)
            if cls_type == "rag":
                cls = self.apitools_query(mes)
                ret = self.call_rag(cls.get('param'), cls.get('api-param'))
            if cls_type == "api":
                cls = self.apitools_query(mes)
                ret = apiBase.call_api(cls.get('param'), cls.get('api-param'))
            if ret is not None:
                # 自动保存到缓存库
                self.save_mes(messages, ret)
                return chatResp
        except Exception as e:
            apiBase.log(f"chat:{messages}")(f"chat:{e}")

        # 自动保存到缓存库
        # resp = self.stream_chat(messages,callbacks)
        resp = self.complete("You are very powerful assistant",messages)
        # timestamp = os.path.getmtime(self.cn_vecdir)
        # print(f"has_old:{self.apitools_time} {timestamp}")
        apiBase.log("resp=" + resp)
        self.save_mes(messages, resp)
        # if callbacks is not None:
        #    callbacks(resp)
        return resp

    # 简单完成对话
    def complete(self, sys_pt,usr_pt):
        if sys_pt == None:
            sys_pt="You are an assistant for question-answering tasks"
        prompt_template = ChatPromptTemplate.from_messages([
            ("system", sys_pt),
            ("user", "{input}")
        ])
        chain =prompt_template | self.llm
        resp = chain.invoke(input=usr_pt)
        return resp.content

    def remove_duplicates(self, text):
        seen = set()
        result = []
        for word in text.split():
            if word not in seen:
                seen.add(word)
                result.append(word)
        return ' '.join(result)

    # 带上下文的对话
    def prompt_chat(self, messages):
        apiBase.log(f"prompt_chat:{messages}")
        # 重复两次，防止出错
        for i in range(2):
            try:
                ret = self.llm.invoke(messages)
                print(ret.message.content)
                str = ret.message.content.replace('```json', '')
                str = str.replace('```', '')
                print(str)
                resp = json.loads(str)
                print(resp.get('label'), resp.get('param'), resp.get('api-param'))
                return resp
            except Exception as e:
                apiBase.log(f"prompt_chat:{e}")
        return None


    def help(self, messages):
        if messages.startswith("帮助") or messages.startswith("系统帮助"):
            return '''1:输入:[命令]开头.例如命令是运行test.py;2:输入:[api]开头.如api是query,参数是param1;3:输入:[api]开头.例如api列表;4:输入:[rag]开头.如rag导入文件或rag导入文件夹,参数是文件或文件夹 ;5:输入:[rag]开头.如rag查询,参数是文件路径+空格+查询内容 ;'''

        if messages.startswith("训练LLM-"):
            param = messages.split("-")
            if len(param) > 1:
                return "参数错误.格式为:训练LLM:文档内容"
            return "训练成功"
        if messages == "api列表":
            file_names = ""
            for root, dirs, files in os.walk(self.dir):
                for file in files:
                    file = file.replace('.hpl', '')
                    file = file.replace('.hwf', '')
                    file_names = file_names + file
            return file_names
        return None

    # 提供文本分类
    def call_class(self, input_text):
        # 需要收集API的输入和输出
        ctx = """你是一个文本实体识别领域的专家"""
        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", ctx),
                ("user", f'<文本>{input_text}</文本>.从提取<文本>中的实体.'),
            ]
        )
        return self.prompt_chat(prompt)
    
    # 加载hpl文件
    def load_hpl_function(self, dir, tool_name):
        path = dir + "/" + tool_name
        tool_name=tool_name.split(".")[0]
        ext = tool_name.split(".")[1]
        # 如果固定run.hpl文件
        if ext == "hpl":
            tree = ET.parse(path)
            root = tree.getroot()
            # 获取name元素
            # element = root.find('info/name')
            # name = element.text
            # element.text="load_hpl_function"
            # tree.write(dir+"/"+tool_name, encoding='utf-8', xml_declaration=True)

            element = root.find('info/description')
            desc = element.text
            if desc is None:
                desc = tool_name

            code = f'''
def {tool_name}(param:str) -> str:
    print('{tool_name} == >'+param)
    return apiBase.call_pip('{path}',param)   
'''
        else:
            #运行python
            desc = tool_name
            code = f'''
def {tool_name}(param:str) -> str:
    print('{tool_name} == >'+param)
    return apiBase.run_python_file('{path}',param)   
'''
        exec(code)
        fun = eval(f'{tool_name}')
        # fun ('111')
        return Tool(name=tool_name, func=fun, description=desc, args_schema=PrimeInput)
    
    # 加载所有的hpl工具文件
    def load_hpl_dir(self, tlpath):
        template = Template(tlpath)
        toolspath = template.substitute(PROJECT_HOME=os.environ["PROJECT_HOME"])
        # 判断目录是否存在
        if not os.path.exists(toolspath) and os.path.isdir(toolspath):
            return None
        self.tools_path = toolspath
        agent_tools = []
        if toolspath.endswith('/'):
            toolspath = toolspath[:-1]
        folder_names = os.listdir(toolspath)
        for name in folder_names:
            file=toolspath + "/" + name
            if os.path.isfile(file):
                tl = self.load_hpl_function(toolspath, name)
                agent_tools.append(tl)
            else:
                self.load_hpl_dir(file)                
        return agent_tools

    # agent查询
    def agent_query(self, agent_executor, params):        
        try:
            ret = agent_executor.invoke({"input": params})
        except Exception as e:
            ret=str(e)
        return ret

    # 根据把文件下面的文件自动提取摘要，保存到数据库中
    def dir_abstract(self, hplspath, clean=False):
        if hplspath.endswith('/'):
            hplspath = hplspath[:-1]
        if clean:
            try:
                # 清除向量库
                self.vect_client.delete_collection(name=self.cn_abstractname)
            except:
                pass

        folder_names = os.listdir(hplspath)
        for name in folder_names:
            if os.path.isdir(hplspath + "/" + name):
                self.dir_abstract(hplspath + "/" + name, False)
                continue
            # 读取文件生成摘要
            if name.endswith('.hpl'):
                self.file_abstract(hplspath + "/" + name)
    
    # 加载所有的api工具文件
    def load_api_tools(self, api_url, api_name):
        apis = api_url.split(';')
        names = api_name.split(';')
        sz = len(apis)
        if len(names) < sz:
            return None
        agent_tools = []
        i = 0
        while i < sz:
            # 加载外部的restapi
            if names[i].strip() =='':
                continue
            code = f'''
def {names[i]}(param:str) -> str:
    print("{names[i]} == >"+param)
    return apiBase.call_api('{apis[i]}',param)   
'''
            exec(code)
            fun = eval(f'{names[i]}')
            #fun ('111')
            tool = Tool(name=names[i], func=fun, description=names[i], args_schema=PrimeInput)
            agent_tools.append(tool)
            i += 1
        return agent_tools
    
    # 加载所有的python工具文件
    def load_python_tools(self, func):
        pattern = r'def\s+(\w+)\('
        match = re.search(pattern, func) 
        func_name = match.group(1)                
        exec(func)
        fun=eval(f'{func_name}')
        #fun ('111')
        agent_tools = []
        tl = Tool(name=func_name, func=fun, description=func_name, args_schema=PrimeInput)
        agent_tools.append(tl)
        return agent_tools

    # 通过外部api创建
    def create_agent(self, agent_tools):
        if agent_tools is None:
            return None
        
        sys_prompt = apiBase.prompt_read("aigc/langchain-agent/system")
        user_prompt = apiBase.prompt_read("aigc/langchain-agent/user")
        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", sys_prompt),
                ("user", user_prompt),
            ]
        )
        agent = create_react_agent(self.llm, agent_tools, prompt)
        self.agent_executor = AgentExecutor(agent=agent, tools=agent_tools,
                                            verbose=False, stream_runnable=False)
        return self.agent_executor

    # 创建langgraph节点
    def load_langgraph_node(self, tools,dir, tool_name):
        if not tool_name.endswith('.hpl'):
            return
        tool_name=tool_name.split(".")[0]
        tools[tool_name]=LangGraphNode(dir,tool_name)
    
    # 加载langgraph的node
    def langgraph_add_nodes(self, name_dict,edge_name,builder,node_tools):        
        if isinstance(edge_name, list):
            return    
        if edge_name=="__start__":
            return
        if edge_name=="__end__":
            return
        if edge_name in name_dict:
            return
        name_dict[edge_name]=edge_name
        builder.add_node(edge_name, node_tools[edge_name])
        
            
    # 加载所有的hpl工具文件
    def load_langgraph_json(self, tlpath):
        template = Template(tlpath)
        toolspath = template.substitute(PROJECT_HOME=os.environ["PROJECT_HOME"])
        node_tools = {}
        if toolspath.endswith('/'):
            toolspath = toolspath[:-1]        
        folder_names = os.listdir(toolspath)
        for name in folder_names:
            ph=toolspath + "/" + name
            if os.path.isfile(ph):
                self.load_langgraph_node(node_tools,toolspath, name)
        
        with open(toolspath + "/edges.json", 'r', encoding='utf-8') as file:
            content=file.read()
            edges = json.loads(content)
        
        name_dict={}
        builder = StateGraph(LangGraphState)        
        
        for edge in edges["add_edges"]:
            self.langgraph_add_nodes(name_dict,edge["from"],builder,node_tools)
            self.langgraph_add_nodes(name_dict,edge["to"],builder,node_tools)
            if edge["from"] == "__start__":
                edge["from"]=START
            if edge["to"] == "__end__":
                edge["to"]=END
            builder.add_edge(edge["from"], edge["to"])     
        
        for edge in edges["add_conditional_edges"]:            
            self.langgraph_add_nodes(name_dict,edge["from"],builder,node_tools)
            with open(toolspath + "/"+edge["should_continue"], 'r', encoding='utf-8') as file:
                code=file.read()
                exec(code)
                # 创建条件函数
                fun = eval('should_continue')
                builder.add_conditional_edges(edge["from"], fun,edge["next"])
        graph = builder.compile()        
        return graph
    
    # 加载所有的hpl工具文件
    def load_langgraph_dir(self, tlpath):
        template = Template(tlpath)
        toolspath = template.substitute(PROJECT_HOME=os.environ["PROJECT_HOME"])
        node_tools = {}
        if toolspath.endswith('/'):
            toolspath = toolspath[:-1]        
        folder_names = os.listdir(toolspath)
        for name in folder_names:
            ph=toolspath + "/" + name
            if os.path.isfile(ph):
                self.load_langgraph_node(node_tools,toolspath, name)
        
        with open(toolspath + "/edges.json", 'r', encoding='utf-8') as file:
            content=file.read()
            edges = json.loads(content)
        
        name_dict={}
        builder = StateGraph(LangGraphState)        
        
        for edge in edges["add_edges"]:
            self.langgraph_add_nodes(name_dict,edge["from"],builder,node_tools)
            self.langgraph_add_nodes(name_dict,edge["to"],builder,node_tools)
            if edge["from"] == "__start__":
                edge["from"]=START
            if edge["to"] == "__end__":
                edge["to"]=END
            builder.add_edge(edge["from"], edge["to"])     
        
        for edge in edges["add_conditional_edges"]:            
            self.langgraph_add_nodes(name_dict,edge["from"],builder,node_tools)
            with open(toolspath + "/"+edge["should_continue"], 'r', encoding='utf-8') as file:
                code=file.read()
                exec(code)
                # 创建条件函数
                fun = eval('should_continue')
                builder.add_conditional_edges(edge["from"], fun,edge["next"])
        graph = builder.compile()        
        return graph
    
apiTools = ApiTools()
#agent=apiTools.load_langgraph_dir('$PROJECT_HOME/88api/langgraph')
#agent.invoke({"aggregate": [], "which": "bc"})
# apiTools.cut_msg("命令是查看会议.md文件",["查看"],[".md"])
# apiTools.chat("hi")

