import os
from LLMService import LLMService
from typing import List, Optional, Dict, Any, Tuple, Callable, Iterator,Union
from markdown_it import MarkdownIt
from markdown_it.tree import SyntaxTreeNode
from termcolor import colored
import re
import math
from classDef import *
from sqlalchemy.orm import Session
from sqlalchemy.orm.attributes import flag_modified
from flask import g, has_request_context
import warnings
from permissionMat import permissionMat,P
from visitPCfs import visitPCfs
warnings.filterwarnings("ignore", category=DeprecationWarning)

class ftree:
    """
    ftree 类包含了对 fnode 的管理逻辑，以及静态链表 (SLL) 和动态规划 (DP) 的搜索方式。
    - _static_tree: 静态存在的节点列表，用于保存整棵树的所有节点信息（SLL形式）
    - _user: 当前登录/操作用户的 ID
    """
    def __init__(self, session: Optional[Session] = None, use_content_vec: bool = False):
        self.USE_CONTENT_VEC = use_content_vec
        # 优先使用外部传入的 session
        self.pendingPermissions: Dict[Tuple[str, int], str] = {}
        if session is not None:
            self.session = session
        else:
            # 如果外部没给，就根据是否处于 Flask 请求上下文来拿 session
            if has_request_context():
                self.session = g.db_session
            else:
                self.session = get_session()
        self.affine = visitPCfs()#only used in set Perm
        self.err = ErrorCode(self.session)
        self.PM = permissionMat(self.err)
        self.root()
        # self.session.commit()
    def IF_MAIN_COMMIT(self,id:int)->Dict[str, Union[str,int]]:
        if id<0:
            return self.err.SUCCESS(id)
        else:
            if __name__=="__main__":
                self.session.commit()
            return self.err.SUCCESS(id)
    def root(self) -> fnode:
        '''
        ### modify
        从数据库中获取根节点
        '''
        root = self.session.query(fnode).where(fnode.id==1).first()#type: ignore
        if root:
            return root
        else:
            root = fnode(content="root", node_type="space")
            setattr(root, '_extra_info', "")  # 记录生成摘要的类型
            setattr(root, '_sim_score', [])
            self.session.add(root)
            self.session.commit()
            return root
    def delUser(self, user: str)->Dict[str,Union[str,int]]:
        """
        ### modify
        删除用户user在所有节点的权限列表中
        但不会导致空间/文件的删除（只会在affine调用下删除）
        """
        allNodes=self.session.query(fnode).all()#type: ignore
        # for node in allNodes:
        #     if (node.node_type == "space") and (node.id!=1) and (user in node.permission):
        #         if node.permission[user]&P.OWNER==P.OWNER:
        #             # print(f"deleting {user} from {self.pNode(node)}")
        #             try:
        #                 self.deleteNode(user,node.id)
        #             except Exception as e:
        #                 return format_error(e)
        # self.session.flush()
        for node in allNodes:
            if user in node.permission:
                # print(f"poping {user} from {self.pNode(node)}")
                node.permission.pop(user)
                flag_modified(node, "permission")
        return self.err.IF_SUCCESS_COMMIT(1)
    def addUser(self, user: str)->Dict[str,Union[str,int]]:
        """
        ### modify
        对root节点添加用户user，并设置权限为P.OWNER
        """
        root = self.root()
        if root.permission.get(user,-1)==-1:
            try:
                root.permission[user]=P.OWNER|P.AGENT_READ
            except Exception as e:
                return format_error(e)
        return self.err.IF_SUCCESS_COMMIT(root.permission.get(user,-1))
    def pNodeId(self,nodeOrId: Union[int,fnode])->str:
        """
        ### modify
        返回节点或节点ID的str形式
        """
        rtnstr= str(nodeOrId) if (isinstance(nodeOrId,int) or isinstance(nodeOrId,str)) else str(nodeOrId.id)
        try:
            int(rtnstr)
            return rtnstr
        except Exception as e:
            print(colored(f"error: pNodeId {rtnstr} is not an integer.Original sliced str:{str(nodeOrId)[0:100]}","red"))
            return rtnstr
    def setPerm(self,node:fnode,user:str,permission:int,method:str):
        """
        ## 只要不报错，就更新成功
        用于唤起permission更新,使得session.commit时有对permission的更新
        """
        try:
            old_perm = node.permission.get(user, P.NONE)
            if method == "add":
                node.permission[user]=old_perm | permission
            elif method == "override":
                node.permission[user]=permission
            elif method == "clear":
                node.permission[user]=old_perm & ~permission
            else:
                raise ValueError(self.err.INVALID_PARAM(f"method {method} 不在支持范围内:add  override  clear"))
            flag_modified(node, "permission")
        except Exception as e:
            raise e
        
    def getsim(self,node:fnode)->List[float]:
        try:
            return getattr(node, '_sim_score', [0]) or [0]
        except Exception as e:
            setattr(node, '_sim_score', [])
            return [0]
    def getextra(self,node:fnode)->str:
        try:
            return getattr(node, '_extra_info', "")
        except Exception as e:
            setattr(node, '_extra_info', "")
            return ""
    def permission_in_the_loop(self,user:str,fsNodeOrId : Union[int,fnode],Operation:str,by: str="user",processType:List[str]=["space","doc"])->fnode:
        """
        如果按照原有checkPermission的逻辑时找不到节点，如果是user发起的，那么真的没权限。
        如果是agent发起的，有可能权限只差了AgentWrite一位。有了这一位的妥协后，如果找到符合权限的，说明用户自己可以写这个节点，只是没有设置autoApprove给agent.
        所以human in the loop一下，问用户确认后继续创建。
        为什么readDoc没有重试的机会？因为RAG时agent看不到id，也就不知道某个文件的id，不会看
        """
        try:
            fsNode:fnode = self.ExistAndPerm(user,fsNodeOrId,Operation,by,processType=processType)
            return fsNode
        except ValueError as e1:
            if by=="user":
                print(colored(f"error:by user-> user permission is not enough.can not find node with permission for operation {Operation}","red"))
                raise e1
            else:
                try:
                    fsNode_as_user:fnode = self.ExistAndPerm(user,fsNodeOrId,Operation,"user",processType=processType)
                    if has_request_context():
                        if self.pendingPermissions.get((user,fsNode_as_user.id),"")=="resolved":
                            return fsNode_as_user
                        elif self.pendingPermissions.get((user,fsNode_as_user.id),"")=="rejected":
                            raise ValueError(self.err.USER_REJECTED(f"{str(e1)} .user rejected agent {Operation} {self.pNode(fsNode_as_user)}"))
                        else:
                            raise ValueError({"success":-int(fsNode_as_user.id),"message":"PERMISSION_IN_THE_LOOP"})
                    else:
                        if input(f"do you want to approve agent {Operation} on {self.pNode(fsNode_as_user)}?(y/n)")=="y":
                            return fsNode_as_user
                        else:
                            raise ValueError(self.err.USER_REJECTED(f"{str(e1)} .user rejected agent {Operation} {self.pNode(fsNode_as_user)}"))
                except ValueError as e2:
                    raise e2
    def addNode(self,user:str, parentNodeOrId: Union[int,fnode], content: str , node_type: str , by: str = "user")->Dict[str, Union[str,int]]:
        """
        ### modify
        Add a node under the parentNodeId (add it to parentNode.children list), return new node ID.
        """
        try:
            parent_node = self.permission_in_the_loop(user,parentNodeOrId,"create",by,processType=["space"])
        except ValueError as e:
            return format_error(e)
        if not parent_node:
            print(colored(f"error:can not find parent node that meets permission {parentNodeOrId}","red"))
            return self.err.NO_PERMISSION()
        add_node = fnode(content=content, node_type=node_type, parent_node=parent_node)
        setattr(add_node, '_extra_info', "")  # 记录生成摘要的类型
        setattr(add_node, '_sim_score', [])
        self.session.add(add_node)
         # 提交更改到数据库
        if parent_node:
            parent_node.children.append(add_node)
        self.extendPerm(add_node,parent_node,user,by)
        return self.IF_MAIN_COMMIT(add_node.id)
    def extendPerm(self,add_node,parent_node,user,by):
        """
        先是为创建的节点赋予它的父节点或(agent)owner的权限
        如果是doc创建，则把其他用户对空间的权限继承到对此文件的权限
        """
        if add_node.node_type=="doc":
            for otherUser in parent_node.permission:
                if otherUser!=user:
                    self.setPerm(add_node,otherUser,parent_node.permission.get(otherUser,P.NONE),method="add")
                    self.setPerm(add_node,otherUser,P.OWNER_ONLY,method="clear")
        addPerm=P.OWNER if by=="user" else P.OWNER|P.AGENT_READ #Assumption: agent自己加的节点，自己得能看得到?
        self.setPerm(add_node,user,parent_node.permission.get(user,P.NONE)|addPerm,"add")
        self.post_change_hook(add_node)
    def buildMdTree_BE(self,user:str, dirNodeOrId: Union[int,fnode], filePath: str, by:str="user")->Dict[str, Union[str,int]]:
        """
        后端测试用。添加后端文件系统中文件到树数据库并建树。
        """
        file_name = os.path.basename(filePath)
        fileNodeId_Dict=self.addNode(user,dirNodeOrId,file_name,"doc",by)
        fileNodeId=int(fileNodeId_Dict.get("success",-1))
        if fileNodeId<0:
            return fileNodeId_Dict
        fileNode=self._findNodeById(fileNodeId)
        if not fileNode:
            return self.err.NODE_NOT_FOUND(f"buildMdTree_BE: {self.pNode(fileNode)}, fileNode not found",fileNodeId)
        with open(filePath, 'r', encoding='utf-8') as md_file:
            content = md_file.read()
        rtnDict=self.buildMdTree(user,fileNodeId,content,by)
        return self.err.IF_SUCCESS_COMMIT(rtnDict)
    
    @timeit("light_cyan")
    def buildMdTree(self, user: str, fileNodeOrId: Union[int,fnode], content: str, by: str="user") -> Dict[str, Union[str,int]]:
        """
        该方法需要一个已经存在的 file 节点（且当前用户需对其具有写权限），
        用来解析 Markdown 文本并在数据库中构建对应的树形节点结构。
        
        整体流程：
        1. 校验权限，获取 file_node；
        2. 使用 markdown-it-py 将 Markdown 文本解析成 tokens，转换为 SyntaxTreeNode；
        3. 调用 _parseMdSubTree(root, file_node) 递归创建并关联子节点；
        4. 提交到数据库，并执行后续的节点修整和权限调整流程；
        5. 最终返回成功信息。
        """

        # 首先检查对 fileNodeOrId 的权限
        try:
            file_node = self.permission_in_the_loop(user, fileNodeOrId, "write", by, processType=["doc"])
        except ValueError as e:
            return format_error(e)
        # 使用 markdown-it-py 解析传入的 content
        md = MarkdownIt()
        env: Dict[str, Any] = {}
        tokens = md.parse(content, env)
        syntax_root = SyntaxTreeNode(tokens)

        # 定义一个递归函数，用于给定 SyntaxTreeNode 以及目标父节点 fnode，将子节点插入数据库
        def _parseMdSubTree(md_node: SyntaxTreeNode, parent: fnode, md_counter: List[int]) -> None:
            """
            递归遍历 md_node 的子节点，为每个子节点创建并添加对应的 fnode 数据库记录。
            - md_node: Markdown 解析树中的一个节点；
            - parent: 数据库中的父节点；
            - md_counter: 用于记录当前递增的 mdSeq 计数（用列表封装以便闭包读写）。
            - summary: LLM在调用editDocModel时生成的当前节点的摘要，用于覆盖在generateSummary时file节点的summary
            """
            for child in md_node.children:
                new_node = fnode(
                    content=child.content or "",
                    node_type=child.tag,
                    parent_node=parent,
                    mdSeq=md_counter[0],
                    start_line=child.map[0] if child.map else 0,
                    end_line=child.map[1] if child.map else 0
                )
                setattr(new_node, '_extra_info', "")  # 记录生成摘要的类型
                setattr(new_node, '_sim_score', [])
                md_counter[0] += 1  # 更新计数

                self.session.add(new_node)
                parent.children.append(new_node)
                _parseMdSubTree(child, new_node, md_counter)

        # 调用递归函数，开始向数据库中写入节点
        _parseMdSubTree(syntax_root, file_node, [0])
        self.session.flush()
        self.trim_md_nodes(file_node)
        self.hierarchize_md_nodes(file_node)
        self.syncStaticTree()
        if not file_node.parent:
            return self.err.NODE_NOT_FOUND(f"buildMdTree: {self.pNode(file_node)}, its parent not found",file_node.id)
        try:
            self.setReculPermission(user,P.NONE,user,file_node.parent.permission.get(user,P.NONE),nodeOrId=file_node)
        except Exception as e:
            return format_error(e)
        self.post_change_hook(file_node)  # file_node itself has no summary now
        return self.IF_MAIN_COMMIT(file_node.id)

        

    def trim_md_nodes(self, node: fnode):
        """
        对于类型为 'p'/'hn' 的节点，取其第一个子节点的内容作为 'p'/ 'hn' 的内容，并删除所有子节点。
        对于类型为 'ul'/'ol' 的节点，将其子节点提升到当前节点的父节点的子节点列表中，并删除原节点。
        对于类型为 'li' 的节点，若其只有一个孩子，则将该孩子提升到当前节点位置，并删除原节点。 
        """
        stack = [node]
        while stack:
            current = stack.pop()
            if current.node_type in ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'] and current.children:
                # 获取第一个子节点的内容，并设置为当前节点的内容
                current.content = current.children[0].content
                if len(current.children)>1:
                    print("warning: p/hn节点",current.id,"有多个孩子，只保留第一个孩子",self.pNodeList(current.children))#type: ignore
                current.children = []
            elif current.node_type in ["ul", "ol"]:
                # 将 'ul' 或 'ol' 的子节点提升到其父节点的子节点列表中
                if current.parent:
                    index = current.parent.children.index(current)
                    # 在提升子节点的同时，移除当前 'ul' 或 'ol' 节点
                    current.parent.children[index:index+1] = current.children
                    # 更新子节点的 parent 指向
                    for child in current.children:
                        child.parent = current.parent
        #     # 继续处理其他节点
            stack.extend(current.children)
        stack=[node]
        while stack:
            current = stack.pop()
            if current.node_type == "li":
                firstchild=current.children.pop(0)
                current.content+=firstchild.content
            stack.extend(current.children)
        # self.session.commit()
        self.session.flush()

    def hierarchize_md_nodes(self, node: fnode):
        """
        重组节点的层级结构，同时保持前序遍历顺序不变。
        例如，对于子节点序列 [h1,p,h2,h2,p,h3,h4]，
        将重组为层级结构但保持前序遍历顺序。

        :param node: 需要重组层级结构的起始节点
        """

        def get_priority(node: fnode) -> int:
            return priority_map.get(node.node_type, 100)

        def process_children(parent: fnode):
            if len(parent.children) <= 1:
                return
            # 保存原始顺序
            parent.children.sort(key=lambda x: x.mdSeq)
            original_children = parent.children.copy()
            # 初始化当前层级的头节点
            current_header = original_children[0]
            current_priority = get_priority(current_header)
            # 从第二个节点开始处理
            i = 1
            while i < len(original_children):
                child = original_children[i]
                child_priority = get_priority(child)
                if child_priority > current_priority:
                    # 如果当前节点优先级更低，移动到当前头节点下
                    parent.children.remove(child)
                    # 将节点插入到current_header的children的末尾
                    current_header.children.append(child)
                    child.parent = current_header
                else:
                    # 如果当前节点优先级相等或更高，它成为新的头节点
                    current_header = child
                    current_priority = child_priority
                i += 1

        stack = [node]
        # 使用前序遍历的顺序处理节点
        while stack:
            current = stack.pop()
            process_children(current)
            # 将子节点按照原始顺序的反序压入栈中（因为栈是LIFO）
            for child in reversed(current.children):
                stack.append(child)

        # self.session.commit()
        self.session.flush()

    def trim_md_nodes_after_hierarchize(self, node: fnode):
        """
        对li节点进行处理，若li节点只有一个孩子，则将该孩子提升到li节点位置，并删除li节点
        """
        stack = [node]
        while stack:
            current = stack.pop()
            if current.node_type != "doc" and len(current.children) == 1:
                # 提升li的单个孩子到当前节点的位置
                if current.parent:
                    child = current.children[0]
                    child.content = current.content + ";" + child.content if current.content else child.content
                    child.summary = current.summary + "," + child.summary if current.summary else child.summary
                    index = current.parent.children.index(current)
                    current.parent.children[index] = child
                    child.parent = current.parent
            stack.extend(current.children)
        # self.session.commit()
        self.session.flush()

    def syncStaticTree(self):
        """
        将树从根遍历后没有的，却仍在数据库中的元素，删除
        注意是从根开始同步，而不是从一个buildMdTree添加的file节点开始同步
        """
        visited = set()
        stack = [self.root()]
        # 遍历树，记录访问过的节点
        while stack:
            current = stack.pop()
            visited.add(current)
            stack.extend(current.children)
        # 删除未被访问的节点
        all_nodes = self.session.query(fnode).all()  # 获取所有节点
        for node in all_nodes:
            if node not in visited:
                self.session.delete(node)  # 从数据库中删除未访问的节点
        # self.session.commit()  # 提交更改
        self.session.flush()

    def renameNode(self, user:str, nodeId: int, new_content: str = "",by:str="user") -> Dict[str, Union[str,int]]:
        """
        Modify the content of a node, requires write permission.For file/dir nodes it will only modify its title.
        """
        try:
            node = self.permission_in_the_loop(user,nodeId,"rename",by,processType=["space","doc"])
        except ValueError as e:
            return format_error(e)
        node.content = new_content
        # Call the single hook after modifying the node
        self.post_change_hook(node,Descend=False)
        return self.IF_MAIN_COMMIT(node.id)

    def deleteNode(self, user: str, nodeId: int, by: str = "user") -> Dict[str, Union[str,int]]:
        """
        删除节点，需要写权限；若节点存在子节点，一并递归删除
        """
        try:
            node = self.permission_in_the_loop(user,nodeId,"delete",by,processType=["space","doc"])
        except ValueError as e:
            return format_error(e)
        # 递归删除子节点
        stack = [node]
        to_delete = []
        while stack:
            curr = stack.pop()
            to_delete.append(curr)
            stack.extend(curr.children)
        # 从数据库中删除节点
        for n in to_delete:
            if n.parent:
                # 从父节点的子节点列表中移除
                n.parent.children.remove(n)
            self.session.delete(n)
        # self.session.commit()  # 提交更改到数据库
        self.session.flush()
        return self.IF_MAIN_COMMIT(node.id)
    def moveNode(self, user: str, nodeId: Union[int,fnode], destParentNodeId: Union[int,fnode], by: str = "user") -> Dict[str, Union[str,int]]:
        """
        移动节点，需要写权限。
        当 node 是 file 时，destParentNode 必须是 dir；当 node 是 dir 时，destParentNode 必须是 root dir。
        :param user: 发起人的用户ID
        :param nodeId: 源节点 ID
        :param destParentNodeId: 目标父节点 ID
        :param by: 构建调用时的角色身份 (agent/user)
        :return: Dict[str, Union[str,int]]，表示操作成功与否及信息，若成功会包含 {"status": "success", "id": 新复制节点的id} 等信息
        """
        try:
            fNode = self.permission_in_the_loop(user, nodeId, "movefrom", by, processType=[ "doc"])
            destParentNode = self.permission_in_the_loop(user, destParentNodeId, "moveto", by, processType=["space"])
        except ValueError as e:
            return format_error(e)

        if fNode.node_type != "doc":
            return self.err.TYPE_NOT_MATCH("moveNode.fNodeOrId must be fileNode")
        if destParentNode.node_type != "space":
            return self.err.TYPE_NOT_MATCH("moveNode.destParentNodeId must be dir")
        destParentNode.children.append(fNode)
        fNode.parent = destParentNode
        # self.session.commit()
        self.extendPerm(fNode,destParentNode,user,by)
        self.session.flush()
        return self.IF_MAIN_COMMIT(fNode.id)

    @timeit("light_cyan")
    def copyNode(self, user: str, fNodeOrId: Union[int,fnode], destDirNodeOrId: Union[int,fnode], by: str = "user") -> Dict[str, Union[str,int]]:
        """
        复制节点，需要写权限。
        当 fNodeOrId 是 file 时，destDirNodeOrId 必须是 dir
        :param user: 发起人的用户ID
        :param fNodeOrId: 源节点 ID 或 源节点对象
        :param destDirNodeOrId: 目标父节点 ID 或 目标父节点对象
        :param by: 构建调用时的角色身份 (agent/user)
        :return: Dict[str, Union[str,int]]，表示操作成功与否及信息，若成功会包含 {"status": "success", "id": 新复制节点的id} 等信息
        """
        # 检查用户对源节点的权限
        try:
            fNode = self.permission_in_the_loop(user, fNodeOrId, "copyfrom", by, processType=["doc"])
            destDirNode = self.permission_in_the_loop(user, destDirNodeOrId, "copyto", by, processType=["space"])
        except ValueError as e:
            return format_error(e)
        # 源节点必须是 file，目标必须是 dir
        if fNode.node_type != "doc":
            return self.err.TYPE_NOT_MATCH("copyNode.fNodeOrId must be fileNode")
        if destDirNode.node_type != "space":
            return self.err.TYPE_NOT_MATCH("copyNode.destParentNodeId must be dir")
        # 递归复制子树
        def _copySubTree(old_node: fnode, new_parent: fnode) -> fnode:
            """
            递归复制 old_node 为一个新的节点并添加到 new_parent.children 下。
            """
            # 创建新的 fnode 对象（不要使用 copy 或 model_copy，手动构造）
            new_node = fnode(
                content=old_node.content,
                node_type=old_node.node_type,
                summary=old_node.summary,
                sum_vec=old_node.sum_vec,
                content_vec=old_node.content_vec,
                permission=old_node.permission,
                start_line=old_node.start_line,
                end_line=old_node.end_line,
                _extra_info=self.getextra(old_node),
                _sim_score=self.getsim(old_node),
                level=old_node.level,
                mdSeq=old_node.mdSeq
            )
            setattr(new_node, '_extra_info', "")  # 记录生成摘要的类型
            setattr(new_node, '_sim_score', [])
            # 建立父子关系
            new_node.parent = new_parent
            new_parent.children.append(new_node)
            self.session.add(new_node)
            # 对于 old_node 的每个子节点，递归复制
            old_node.children.sort(key=lambda x: x.mdSeq)
            for child in old_node.children:
                _copySubTree(child, new_node)
            return new_node

        # 调用递归复制方法，返回根节点
        copied_root = _copySubTree(fNode, destDirNode)
        # 提交到数据库
        self.extendPerm(copied_root,destDirNode,user,by)
        self.session.flush()
        return self.IF_MAIN_COMMIT(copied_root.id)

    def simpleLiteralSearch(self, user: str, text: str, use_regex: bool = False, by: str = "user") -> List[fnode]:
        """
        需要比
        简单字面搜索，包含大小写敏感/不敏感等可自行扩展。
        返回匹配文本的节点列表。
        目前字面搜索只包含对 content 的匹配。

        在原有实现中，我们使用 query(fnode).all() 将所有节点一次性加载到内存中，
        如果节点数量非常庞大，会造成内存不足。下面的修改使用批量分页查询，每次仅加载部分节点进行处理，
        以减少内存开销。
        """
        keywords = LLMService().generateSumByKeyWord(text,topK=3)
        if text not in keywords:
            keywords.append(text)
        print(keywords)
        # max_return_len, _ = self.getDPparam(None)
        results = []
        # 每批次处理的节点数量，可根据需求调整
        CHUNK_SIZE = 2000
        offset = 0
        # 分页获取节点，避免一次性加载过多节点占用内存
        while True:
            chunk_nodes = self.session.query(fnode).limit(CHUNK_SIZE).offset(offset).all()
            if not chunk_nodes:
                break  # 没有更多节点，结束循环
            for node in chunk_nodes:
                _sim_score = self.getsim(node)
                _sim_score.append(0.0)
                setattr(node, '_sim_score', _sim_score)
                # 先检查权限
                try:
                    self.PM.pm(user, node, "read",by)
                except ValueError as e:
                    continue
                # 对每个关键词进行匹配
                for kw in keywords:
                    if use_regex:
                        # 正则匹配
                        if re.search(kw, node.content):
                            results.append(node)
                            _sim_score = self.getsim(node)
                            _sim_score[-1] = 1.0
                            setattr(node, '_sim_score', _sim_score)
                            break  # 当前节点已匹配则可跳出
                    else:
                        if kw.lower() in node.content.lower():
                            results.append(node)
                            _sim_score = self.getsim(node)
                            _sim_score[-1] = 1.0
                            setattr(node, '_sim_score', _sim_score)
                            break  # 当前节点已匹配则可跳出
            offset += CHUNK_SIZE
            # 如果已获取的结果数量超过了最大返回值，可直接停止处理后续页面
            # if len(results) >= max_return_len:
            #     break
        return results

    def SLLsearch(self, user: str, text: str, accept_cos_threshold: float, by: str = "user", vec_type: str = "content_vec") -> List[fnode]:
        """
        SLL 方式的模糊搜索：对 content_vec 或 sum_vec 做余弦相似度，超过一定阈值则返回。
        为了减少内存占用，改为分批从数据库读取节点（与 simpleLiteralSearch 类似）。
        """
        max_return_len, _ = self.getDPparam(None)
        query_vec = self.embQuery([text])  # 传递一个包含 text 的列表，用于生成查询向量
        candidates:List[fnode] = []
        # 每批次处理的节点数量，可根据需求调整
        CHUNK_SIZE = 500
        offset = 0
        while True:
            # 分批获取节点，避免一次性加载过多节点造成内存吃紧
            chunk_nodes = self.session.query(fnode).limit(CHUNK_SIZE).offset(offset).all()
            if not chunk_nodes:
                break
            for node in chunk_nodes:
                # 根据 vec_type 选择比较向量
                cur_vec = node.content_vec if vec_type == "content_vec" else node.sum_vec
                if cur_vec:
                    # 先检查用户对于节点的权限
                    try:
                        self.PM.pm(user, node,"read",by)
                    except ValueError as e:
                        continue
                    # 计算余弦相似
                    _sim_score = self.getsim(node)    
                    _sim_score.extend([self.cos_sim(qv,cur_vec) for qv in query_vec])
                    setattr(node, '_sim_score', _sim_score)
                    if max(_sim_score) >= accept_cos_threshold:
                        candidates.append(node)
            offset += CHUNK_SIZE
            # 结果超过 max_return_len，则无需再往后处理
            if len(candidates) >= max_return_len:
                break
        # 按 cos_score 从高到低排序，并截断到 max_return_len
        candidates.sort(key=lambda x: max(self.getsim(x)), reverse=True)
        top_nodes = candidates[:max_return_len]
        # 控制台打印调试信息（可根据需要删除或保留）
        for idx, node in enumerate(top_nodes):
            print(f"SLL结果 {idx + 1}: {self.getsim(node)} {self.pNode(node)}")
        return top_nodes
    def embQuery(self, strList: list[str]) -> List[List[float]]:
        """
        接收一个字符串列表 strList，返回与之顺序相匹配的向量列表。
        具体做法：
        1. 去除重复文本，以减少不必要的重复嵌入；
        2. 查询缓存表 cache_emb，找出已缓存的向量；
        3. 对未命中的文本使用 LLMService().embedding() 生成向量并写入缓存；
        4. 最终保证输出的 List[List[float]] 的顺序与输入 strList 相同。
        """

        # 1) 先对输入去重，从而减少重复嵌入的次数
        unique_texts = list(set(strList))
        # 为了后面能正确放回对应位置，这里做一个辅助映射
        text_to_index_list:Dict[str,List[int]] = {}
        for idx, text in enumerate(strList):
            if text not in text_to_index_list:
                text_to_index_list[text] = []
            text_to_index_list[text].append(idx)

        # 2) 在 cache_emb 中查找是否已有缓存
        cached_records = self.session.query(cache_emb).filter(cache_emb.content.in_(unique_texts)).all()#type: ignore
        cached_dict = {record.content: record.vec for record in cached_records}

        # 用于存储尚未在缓存中的文本
        need_embedding_texts = []

        # 记录最终结果向量（与原先 strList 长度一致）
        result_vectors:List[List[float]] = [[] for _ in range(len(strList))]

        for text in unique_texts:
            # 如果缓存中没有，则记录一下
            if text not in cached_dict:
                need_embedding_texts.append(text)

        # 只有在 need_embedding_texts 非空时才调用 LLMService 以免浪费
        if need_embedding_texts:
            new_embs = LLMService().embedding(need_embedding_texts)
            # 将新得到的向量存进缓存表
            for text, emb_vec in zip(need_embedding_texts, new_embs):
                # 插入新纪录到 cache_emb 中
                new_cache = cache_emb(content=text, vec=emb_vec)
                self.session.add(new_cache)
                # 同时放到 cached_dict 里，便于统一处理
                cached_dict[text] = emb_vec
            # 刷新缓存表
            self.session.flush()
        
        # 3) 按照原来的 strList 顺序，将向量填充到 result_vectors
        for text, indices in text_to_index_list.items():
            # 缓存中肯定有了（无论是旧的还是刚刚新加的）
            emb = cached_dict.get(text, [])
            for i in indices:
                result_vectors[i] = emb

        # 如果需要将向量从浮点数转成整型，可在下方转换：
        # result_vectors = [[int(round(v)) for v in vec] for vec in result_vectors]

        return result_vectors
    def DPsearch(self, user: str, roots: List[fnode], text: str, by: str = "user", vec_type: str = "sum_vec") -> List[fnode]:
        """
        # 子节点有读权限但父节点无读权限的问题，如果是自己创建的节点（Own)，不存在这个问题。
        如果是接受他人的分享，affine会自动解决文件节点和空间节点的问题，任何一个用户添加，对根节点都是P.OWNER，也没这个问题。
        如果自己设置context,有这个问题。so, agentRead在set时应该agentPermEscalate.
        对整棵树进行分层剪枝的语义搜索：
        1. 从传入的所有根节点开始，为每个根节点计算相似度。如果 max(_sim_score) 大于阈值，则纳入候选并继续向下递归；
        2. 每一层只保留相似度排名前 top_k 的子节点（比较方式为各自的 max(_sim_score)），再次对这些子节点递归进行同样的操作；
        3. 最终在所有满足阈值的节点中，按 max(_sim_score) 降序返回最多 max_return_len 个结果。
        :param user: 当前用户
        :param roots: 需要处理的根节点列表
        :param text: 要搜索的文本
        :param by: 操作者身份，默认为 "user"
        :param vec_type: 使用的向量类型，默认为 "sum_vec"
        :return: 返回符合条件的 fnode 列表（按相似度降序最多 max_return_len 个）
        """
        DEBUG = False
        # 生成关键词后做 embedding，得到一个 List[List[float]]，其中每个元素都是一个向量
        keywords = LLMService().generateSumByKeyWord(text,topK=3)
        if text not in keywords:
            keywords.append(text)
        query_vecs = self.embQuery(keywords)  # List[List[float]]
        if DEBUG:
            print(f"根节点数量: {len(roots)}")
        results: List[fnode] = []  # 存储符合条件的节点
        def getDPchildren(parent: fnode, vecs: List[List[float]]) -> List[fnode]:
            """
            计算父节点的所有子节点与查询向量列表 vecs 的相似度，结果保存在 child._sim_score。
            然后根据各子节点的 max(child._sim_score) 从高到低进行排序，并选取前 k 个作为下一层节点。
            """
            trimChildren: List[fnode] = []
            # 选用 sum_vec 或 content_vec 对子节点进行一次性相似度计算
            for child in parent.children:
                cur_vec = child.sum_vec if vec_type == "sum_vec" else child.content_vec
                # 对所有查询向量依次计算相似度
                if cur_vec:
                    _sim_score = self.getsim(child)
                    _sim_score.extend([self.cos_sim(qv, cur_vec) for qv in vecs])
                    setattr(child, '_sim_score', _sim_score)
                else:
                    _sim_score = self.getsim(child)
                    _sim_score.extend([0.0 for _ in vecs])
                    setattr(child, '_sim_score', _sim_score)
            k, threshold = self.getDPparam(parent)  # 动态分层参数
            if DEBUG:
                print(f"top_k: {k} threshold: {threshold} for parent {self.pNode(parent)}")
            # 将 max(child._sim_score)>=threshold 的子节点纳入候选
            for child in parent.children:
                if max(self.getsim(child)) >= threshold:
                    trimChildren.append(child)
            # 用 max(child._sim_score) 进行排序，保留前 k
            trimChildren.sort(key=lambda x: max(self.getsim(x)), reverse=True)
            if DEBUG:
                print(f"top_k={k} len={len(trimChildren)} of parent {self.pNode(parent)}")
            return trimChildren[:k]
        def dfs_prune(node: fnode, vecs: List[List[float]]):
            """
            递归搜索。此处不再对节点重复追加相似度值，只做阈值判断并将符合条件者加入 results。
            然后递归地处理符合条件的子节点（通过 getDPchildren 计算并筛选）。
            """
            try:
                self.PM.pm(user, node,"read",by)
            except ValueError as e:
                # print(e)
                return

            # 若当前节点尚未有 _sim_score，可在这里补充一次初始化（如果需要的话）。
            # 若确信在 getDPchildren 或外部已对其做过相似度计算，可直接使用即可：
            if not self.getsim(node):
                cur_vec = node.sum_vec if vec_type == "sum_vec" else node.content_vec
                if cur_vec:
                    _sim_score = self.getsim(node)
                    _sim_score.extend([self.cos_sim(qv, cur_vec) for qv in vecs])
                    setattr(node, '_sim_score', _sim_score)
                else:
                    _sim_score = self.getsim(node)
                    _sim_score.extend([0.0 for _ in vecs])
                    setattr(node, '_sim_score', _sim_score)

            # 获取当前节点自身阈值
            _, threshold = self.getDPparam(node)
            node_score = max(self.getsim(node)) if self.getsim(node) else 0.0

            # 若当前节点超过阈值或者为根节点 id==1，则纳入结果
            if node_score >= threshold or node.id == 1:
                results.append(node)

            # 继续递归处理子节点
            top_children = getDPchildren(node, vecs)
            for child in top_children:
                dfs_prune(child, vecs)

        # 从所有根节点开始递归搜索
        for root in roots:
            dfs_prune(root, query_vecs)

        # 最终过滤、排序并截断到 max_return_len
        max_return_len, threshold_return = self.getDPparam(None)
        # 过滤掉相似度过低的节点
        results = [n for n in results if (max(n._sim_score) if n._sim_score else 0.0) >= threshold_return]

        # 按 max(_sim_score) 降序排序
        results.sort(key=lambda x: max(x._sim_score) if x._sim_score else 0.0, reverse=True)

        if DEBUG:
            for idx, node in enumerate(results[:max_return_len]):
                print(f"DP结果 {idx + 1}:  {self.pNode(node)}")

        return results[:max_return_len]
    def ExistAndPerm(self, user:str,nodeOrId: Union[int,fnode],operation:str,by:str,
                     processType: Optional[List[str]] = None, 
                     excludeType: Optional[List[str]] = None,) -> fnode:
        """
        ### const
        ## 如果没抛出异常，一定列表非空
        一个通用方法，用于检查：
        1. 节点是否存在。
        2. 当前用户是否具有指定的权限。
        3. 节点类型是否符合包含/排除的条件。

        由于权限方面仍然调用 checkPermission 方法，所以仍然只能检查自己的权限，而不能指定检查其他用户userid的权限
        :param nodeId: 节点 ID
        :param required_permission: 权限掩码，如果未提供，则不检查权限
        :param processType: 需要处理的节点类型列表，如果未提供，则不对节点类型进行限制
        :param excludeType: 排除的节点类型列表，如果未提供，则不对节点类型进行限制
        :return: 返回符合条件的节点列表，如果有错误则抛出异常
        """
        # print(colored(f"ExistAndPerm -user {user} -nodeOrId {self.pNodeId(nodeOrId)} -oper {operation} -by {by} -p {processType} -ex {excludeType}",'light_cyan'))
        # 确保输入总是列表
        node = self._findNodeById(nodeOrId)
        if node is None:
            print(colored(f"error: node {nodeOrId} not found. caller info: {operation}", 'red'))
            raise ValueError(self.err.NODE_NOT_FOUND(f"node {nodeOrId} not found. caller info: {operation}\n {self.pNode(node)}",int(self.pNodeId(nodeOrId))))
        self.PM.pm(user,node, operation,by)
        if processType and (node.node_type not in processType):
            print(colored(f"error: node type {node.node_type} not in processType {processType}. caller info: {operation}", 'red'))
            raise ValueError(self.err.TYPE_NOT_MATCH(f"node type {node.node_type} not in processType {processType}. caller info: {operation}\n {self.pNode(node)}"))
        if excludeType and (node.node_type in excludeType):
            print(colored(f"error: node type {node.node_type} in excludeType {excludeType}. caller info: {operation}", 'red'))
            raise ValueError(self.err.TYPE_NOT_MATCH(f"node type {node.node_type} in excludeType {excludeType}. caller info: {operation}\n {self.pNode(node)}"))
        # print(colored(f"pNode: {self.pNode(node)}",'light_cyan'))
        return node
    
    def treeListMdseq(self, includeSet: set[fnode]=set(), node: Optional[fnode] = None) -> List[fnode]:
        """
        递归收集整棵树（含子目录和其中的文件），并在遇到文件时，插入此文件的所有 Markdown 子节点（按 mdSeq 顺序）。
        最终返回一个按"前序遍历 + 文件节点内按 mdSeq"顺序排好的 fnode 列表。

        1) 如果 node 为 None，则从根节点开始；
        2) 对每个 Dir，前序遍历子目录，遇到 File 时，调用 showFileTree 来获取该文件及其 MD 子节点的顺序；
        3) 所有收集到的节点保存在一个列表中返回；
        4) 如果 includeSet 不为空，则最后只返回在 includeSet 之内的节点；
        5) 同时为每个节点清空 _extra_info，并分配一个 level（目录节点和文件节点比上层多一级，文件里的 MD 子节点再多一级）。
        """
        if node is None:
            node = self.root()
        if node is None:
            return []
        results: List[fnode] = []
        def ifSetFilter(nodes: List[fnode])->List[fnode]:
            if includeSet:
                return list(filter(lambda x: x in includeSet,nodes))
            return nodes
        def showFileTree(fileNode: fnode)->List[fnode]:
            """
            打印文件内容
            """
            fmdNodes=list(self.collect_all_descendants(fileNode))
            fmdNodes.sort(key=lambda x: x.mdSeq)#由于filenode的mdSeq为-1，所以排在前，也可参与排序
            fmdNodes=ifSetFilter(fmdNodes)
            return fmdNodes
        def preOrderDir(cur_dir: fnode):
            results.append(cur_dir)
            sub_dirs  = [c for c in cur_dir.children if c.node_type == "space"]
            sub_files = [c for c in cur_dir.children if c.node_type == "doc"]
            sub_dirs=ifSetFilter(sub_dirs)
            sub_files=ifSetFilter(sub_files)
            sub_dirs.sort(key=lambda x: x.id)
            sub_files.sort(key=lambda x: x.id)
            for d in sub_dirs:
                preOrderDir(d)
            for f in sub_files:
                fmds = showFileTree(f)
                for fmd_node in fmds:
                    results.append(fmd_node)
        preOrderDir(node)
        return results

    def treeListPre(self, includeSet: set[fnode]=set(), node: Optional[fnode] = None) -> List[fnode]:
        """
        前序遍历返回可打印顺序的树节点列表
        """
        if node is None:
            node = self.root()
        if node is None:
            return []
        rtnlist=[]
        def ifSetFilter(nodes: List[fnode])->List[fnode]:
            if includeSet:
                return list(filter(lambda x: x in includeSet,nodes))
            return nodes
        def preOrder(curr: fnode):
            rtnlist.append(curr)  # 使用 pNode() 输出节点信息
            curr.children.sort(key=lambda x: x.mdSeq)
            for child in ifSetFilter(curr.children):
                preOrder(child)
        preOrder(node)
        return rtnlist


    def pNode(self, node: Optional[fnode], summaryContent: str = "0:50",style:str="") -> str:
        """
        node's level is assigned in this function.
        ## here, by can only be "user" or "agent"!!!
        ## summaryContent is only used when by=="user"
        """
        # 将 summaryContent 拆分为两个整数
        sc=summaryContent.split(":")
        if len(sc)!=2:
            summary_len=50
            content_len=50
        else:
            if not str(sc[0]).isdigit():
                summary_len=10000
            else:
                summary_len=int(sc[0])
            if not str(sc[1]).isdigit():
                content_len=10000
            else:
                content_len=int(sc[1])
        def pPerm(perm:Dict[str,int])->str:
            rtnstr=""
            for u,p in perm.items():
                rtnstr+=f"[{u}=>{p:08b}]"
            return rtnstr
        def pSimScore(node: fnode) -> str:
            if not node:
                return ""
            # print(f"nodesim: {node.id} {node.content} {node.node_type} {node.summary} {node.content} {node.start_line} {node.end_line}")
            sim_val:Optional[List[float]] = None
            try:
                sim_val = self.getsim(node)
            except Exception as e:
                # print(colored(f"error: {e}",'red'))
                if sim_val is None:
                    sim_val = []
                    setattr(node, '_sim_score', [])
                    sim_val = self.getsim(node)
            if not sim_val:
                return ""
            return f"{max(sim_val):.2f}"
        def pLevel(node: fnode) -> int:
            """
            获取节点的层级深度，用于格式化输出。
            """
            level = 0
            current = node
            while current.parent:
                level += 1
                current = current.parent
            return level
        if node is None:
            return ""
        ex_info = self.getextra(node)
        level = pLevel(node)
        node.level=level
        permissions_str =pPerm(node.permission or {})
        pColor = "light_grey"
        if pSimScore(node) == "1.00":
            pColor = "cyan"
        elif ex_info == "search":
            pColor = "blue"
        elif ex_info == "ensure":
            pColor = "light_yellow"
        # print(f"style: {style}")
        if "c" in style:
            ln=(("\n"+('     ' * level)) if len(node.summary[:summary_len])*len(node.content[:content_len])>0 else '')
            return colored(f"""{'    |' * level}[{node.id}-{node.mdSeq}:{node.node_type} {ex_info} {permissions_str} {pSimScore(node)} ({node.start_line}-{node.end_line})]{colored(node.sum_vec, 'red') if not node.sum_vec else ''}{node.summary[:summary_len]}{ln}{node.content[:content_len]}""",pColor)#type: ignore
        else:
            if node.node_type == "doc":
                return f"{'   |' * level}{{type:{node.node_type},id:{node.id},name:{node.content},summary:{node.summary}}},"
            elif node.node_type == "space":
                return f"{'   |' * level}{{type:{node.node_type},id:{node.id},name:{node.content}}},"
            else:
                return f"{'   |' * level}{{type:{node.node_type},content:{node.content}}},"
    def getPath(self, node: fnode) -> tuple[str,str]:
        """
        获取节点路径
        """
        current:Optional[fnode] = node
        docName=""
        spaceName=""
        while current:
            if current.node_type == "doc":
                docName=current.content
            elif current.node_type == "space" and current.id!=1:
                spaceName=current.content
            current = current.parent
        return (spaceName,docName)
    def pNodeList(self, nodeList: List[Union[fnode,int]], summaryContent: str = "0:50",style: str="cat") -> str:
        # 将 nodeList 中的 int 转换为 fnode 对象
        if isinstance(nodeList[0], int):
            nodeOrUList:List[Optional[fnode]] = [self._findNodeById(node) for node in nodeList]
            nodeList = [node for node in nodeOrUList if node is not None]
        if "t" in style:
            return "\n".join([self.pNode(node, summaryContent,style) for node in nodeList if isinstance(node, fnode)])
        else:
            # 将 fnode 对象转换为可序列化的字典
            serializable_list = []
            for node in nodeList:
                if isinstance(node, fnode):
                    spaceName, docName = self.getPath(node)
                    serializable_list.append({
                        'id': node.id,
                        'content': node.content,
                        'node_type': node.node_type,
                        'level': node.level,
                        '_sim_score': max(self.getsim(node)) if self.getsim(node) else 0.0,
                        'start_line': node.start_line,
                        'end_line': node.end_line,
                        'result_type': self.getextra(node),
                        'spaceName': spaceName,
                        'docName': docName  # if space then here is empty
                    })
                else:
                    serializable_list.append(node)  # type: ignore
            return json.dumps(serializable_list, indent=1, ensure_ascii=False)
    def collect_all_descendants(self, node: fnode) -> List[fnode]:
        """
        递归地生成给定节点的所有后代节点，包括自身。
        """
        rtnList=[node]
        node.children.sort(key=lambda x: x.mdSeq)
        for child in node.children:
            rtnList.extend(self.collect_all_descendants(child))
        return rtnList
    def collect_all_ancestors(self, node: fnode) -> List[fnode]:
        """
        Recursively collect all ancestor nodes of the given node.
        """
        rtnList=[]
        current = node.parent
        while current:
            rtnList.append(current)
            current = current.parent
        return rtnList
    def post_change_hook(self, node: fnode, Descend:bool=True):
        """
        ### generate summary and embeddings. the last line is commit.
        After adding or modifying a node, this hook is called to generate summaries and embeddings.
        """
        def geneDesendSum(node: fnode):
            #递归生成节点及其所有后代节点的摘要。
            if not node.children:  # 如果没有子节点，直接生成摘要
                self.generateSummary(node)
            else:
                for child in node.children:
                    geneDesendSum(child)
                self.generateSummary(node)
        def geneAncestorSum(node: Optional[fnode]):
            #刷新给定节点所有祖先的摘要。
            if node is None:
                return
            current = node.parent
            while current:
                self.generateSummary(current) 
                current = current.parent
        if Descend:
            geneDesendSum(node)
        geneAncestorSum(node)
        descendants = self.collect_all_descendants(node)
        ancestors = self.collect_all_ancestors(node)
        combined_nodes = list(descendants) + list(ancestors)
        self.embSC(combined_nodes)
        # self.session.commit()
        self.session.flush()
    def embSC(self, nodes: List[fnode]):
        """
        对传入的节点批量生成 content 与 summary 的向量表示，并存储到缓存表中。
        现在将内部向量生成逻辑统一交由 embQuery 处理，以避免重复嵌入和多处缓存操作。

        具体流程：
        1. 遍历节点，若 USE_CONTENT_VEC=True 并且 content 不为空，则加入批量处理；
           若 summary 不为空，也加入批量处理；
           同时对空文本直接赋值为空向量。
        2. 调用 self.embQuery 传递待嵌入文本列表，该方法内部负责缓存查询及新增。
        3. 依次为每个文本分配对应的向量到节点的 content_vec 或 sum_vec。
        """

        # 准备好要进行嵌入的文本及其对应字段信息
        all_texts: List[str] = []
        fields: List[Tuple[fnode, str]] = []  # (节点对象, "content_vec"或"sum_vec")

        for node in nodes:
            # 处理 content
            if self.USE_CONTENT_VEC:
                if not node.content:
                    node.content_vec = []
                else:
                    all_texts.append(node.content)
                    fields.append((node, "content_vec"))

            # 处理 summary
            if not node.summary:
                node.sum_vec = []
            else:
                all_texts.append(node.summary)
                fields.append((node, "sum_vec"))

        # 若没有需要嵌入的文本，直接返回
        if not all_texts:
            return

        # 调用 embQuery 统一处理向量生成和缓存逻辑
        vectors = self.embQuery(all_texts)

        # 将结果依次赋给对应节点
        for i, (node, field_name) in enumerate(fields):
            if field_name == "content_vec":
                node.content_vec = vectors[i]
            else:
                node.sum_vec = vectors[i]
    def shareEscalate(self, user:str,operation:str,give_user:str,setPerm:int, nodeOrId: Union[int,fnode]):
        """
        如果父节点有checkPerm,正在设置nodeOrId的setPerm权限，自动提升父节点(setPerm)权限.为了DPsearch.
        """
        node=self._findNodeById(nodeOrId)
        if not node:
            raise ValueError(self.err.NODE_NOT_FOUND(f"node: {nodeOrId} not found",int(self.pNodeId(nodeOrId))))
        while node.parent:
            self.PM.pm(user,node,operation,"user")
            if not self.PM.greater(node.parent.permission.get(give_user,P.NONE),setPerm):
                # if input(colored(f"""setting {otheruser}'s node's {give_other_permission:08b} permission while its parent do not have it.""","red")+
# f"""\nNode have {node.permission.get(otheruser,P.NONE):08b}, will have {node.permission.get(otheruser,P.NONE)|give_other_permission:08b},parent have {node.parent.permission.get(otheruser,P.NONE):08b}
# {self.pNode(node.parent)}
# {self.pNode(node)}
# Also give node's parent {give_other_permission:08b} permission?(y/n)""")=="y":
                print(colored(f"warning: during {user} sharing to {give_user}, escalating parent {node.parent.id} permission to {setPerm:08b}\n{self.pNode(node.parent)}\n{self.pNode(node)}",'yellow'))
                self.setPerm(node.parent,give_user,setPerm,"add")
                self.session.flush()
            node=node.parent
    def agentPermEscalate(self, user:str,checkPerm:int,setPerm:int, nodeOrId: Union[int,fnode]):
        """
        如果父节点有checkPerm,正在设置nodeOrId的setPerm权限，自动提升父节点(setPerm)权限.为了DPsearch.
        """
        node=self._findNodeById(nodeOrId)
        if not node:
            raise ValueError(self.err.NODE_NOT_FOUND(f"node: {nodeOrId} not found",int(self.pNodeId(nodeOrId))))
        while node.parent:
            if self.PM.greater(node.parent.permission.get(user,P.NONE),checkPerm):
                if not self.PM.greater(node.parent.permission.get(user,P.NONE),setPerm):
                    # if input(colored(f"""setting {otheruser}'s node's {give_other_permission:08b} permission while its parent do not have it.""","red")+
# f"""\nNode have {node.permission.get(otheruser,P.NONE):08b}, will have {node.permission.get(otheruser,P.NONE)|give_other_permission:08b},parent have {node.parent.permission.get(otheruser,P.NONE):08b}
# {self.pNode(node.parent)}
# {self.pNode(node)}
# Also give node's parent {give_other_permission:08b} permission?(y/n)""")=="y":
                    print(colored(f"warning: auto escalating parent {node.parent.id} permission of {setPerm:08b}\n{self.pNode(node.parent)}\n{self.pNode(node)}",'yellow'))
                    self.setPerm(node.parent,user,setPerm,"add")
                    self.session.flush()
            else:
                print(colored(f"warning: parent {node.parent.id} do not have checkPerm{checkPerm:08b} permission while the origial node {nodeOrId} have it. Permission db intergrity in doubt!\n{self.pNode(node.parent)}\n{self.pNode(node)}",'yellow'))
            node=node.parent
    def _findNodeById(self, nodeOrId: Union[int,fnode]) -> Optional[fnode]:
        """
        ### const
        辅助方法，根据 nodeId 在数据库中查找对应 fnode。
        """
        if isinstance(nodeOrId, fnode):
            return nodeOrId
        if nodeOrId == -1 or ( not isinstance(nodeOrId, int) and not isinstance(nodeOrId, str)):
            print(colored(f"nodeOrId: {nodeOrId} is not a valid node or id",'red'))
            return None
        if isinstance(nodeOrId, str):
            try:
                nodeOrId = int(nodeOrId)
            except ValueError:
                print(colored(f"nodeOrId: {nodeOrId} is not a valid node or id",'red'))
                return None
        node = self.session.get(fnode, nodeOrId)
        if not node:
            print(colored(f"node: {nodeOrId} not found",'red'))
        return node
    def cos_sim(self, v1: List[float], v2: List[float]) -> float:
        """
        计算余弦相似度的辅助函数。
        """
        if not v1 or not v2:
            return 0.0
        dot = sum(a * b for a, b in zip(v1, v2))
        norm1 = sum(a * a for a in v1) ** 0.5
        norm2 = sum(b * b for b in v2) ** 0.5
        if norm1 == 0 or norm2 == 0:
            return 0.0
        return dot / (norm1 * norm2)
    def agentPerm(self,user, nodeOrId: Union[int,fnode],permChar:str,method:str="add") -> Dict[str,Union[str,int]]:
        """
        设置 Agent 的上下文权限。
        - method: 权限设置方法，"add" 表示权限叠加，"override" 表示覆盖原有权限，"clear" 表示将为1位的权限设置为0
        """
        try:
            checkPerm=P.NONE
            setPerm=P.NONE
            if permChar=="O":
                checkPerm=P.OWNER
                setPerm=P.AGENT_OWNER
            elif permChar=="A":
                checkPerm=P.ADMIN
                setPerm=P.AGENT_ADMIN
            elif permChar=="W":
                checkPerm=P.WRITE
                setPerm=P.AGENT_WRITE
            elif permChar=="R":
                checkPerm=P.READ
                setPerm=P.AGENT_READ
            else:
                return self.err.INVALID_PARAM(f"permChar: {permChar} is not a valid permission character")
            self.setReculPermission(user,checkPerm,user, setPerm, nodeOrId,method=method)
            if method=="add":
                self.agentPermEscalate(user,P.READ,P.AGENT_READ,nodeOrId)
        except Exception as e:
            return format_error(e)
        return self.err.IF_SUCCESS_COMMIT(int(self.pNodeId(nodeOrId)))

    
    def setReculPermission(self,user: str,check_permission:int,give_user:str,give_other_permission: int, nodeOrId: Union[int,fnode], method: str = "add") -> None:
        """
        set不应该有检验
        ## void因为只要失败，必然抛出错误。没抛错误就成功
        用于递归设置一个块及其子块的他人的权限，在这之前，需要检查当前用户是否具有权限
        - user: 要分享给的用户的 ID
        - give_other_permission: 要分享给别人的新权限
        - nodeOrId: 节点或节点 ID 列表
        - method: 权限设置方法，"add" 表示权限叠加，"override" 表示覆盖原有权限，"clear" 表示将give_other_permission的为1位的权限设置为0
            - 例如：原始权限为 0111，give_other_permission=0010，使用 "clear" 方法，结果设置为 0101。如果原始权限为 1111，give_other_permission=1000，结果设置为 0111。
        """
        origNode=self._findNodeById(nodeOrId)
        if not origNode:
            raise ValueError(self.err.NODE_NOT_FOUND(f"node: {nodeOrId} not found",int(self.pNodeId(nodeOrId))))
        if not origNode.node_type in ["space","doc"]:
            raise ValueError(self.err.INVALID_PARAM(f"node: {nodeOrId} with type {origNode.node_type} is not among space or doc. Can not set permission for it."))
        stack = [origNode]
        while stack:
            curr = stack.pop()
            if not self.PM.greater(curr.permission.get(user,P.NONE),check_permission):
                pass
                # raise ValueError(self.err.NO_PERMISSION(f"in setReculPermission,user: {user} do not have {check_permission:08b} permission, can not give {give_other_permission:08b} permission to {give_user} \n{self.pNode(curr)}"))
            else:
                self.setPerm(curr,give_user,give_other_permission,method)
            stack.extend(curr.children)
        # self.session.commit()
    def generateSummary(self, node: fnode):
        """
        根据节点的类型和内容生成摘要。
        对于文件类型的节点，使用LLM（大型语言模型）生成摘要。但是如果是agent在修改文章，那么agent能顺便生成新的摘要.并且要考虑防抖。因此不会在此底层调用它file层面的LLM总结。
        对于其他类型的节点，根据其内容和子节点的摘要合成摘要。
        """
        # print(f"generateSummary entered {self.pNode(node)}")
        geneType = ""  # 用于记录生成摘要的方式
        # if node.node_type == "doc":
        #     node.summary = node.content
        #     return 
        # else:
        if node.id==1:#根节点,不用summary,也就不会有sum_vec
            node.summary=""
        elif node.node_type == "space":
            geneType = "kwcat"
            node.summary=node.content
            if node.children:
                node.summary+=","+",".join([child.content+":"+child.summary for child in node.children])
        elif len(node.children) == 0:
            geneType = "kw"
            node.summary = ",".join(LLMService().generateSumByKeyWord(node.content))
        else:
            geneType = "kwcat"
            node.summary = ",".join(LLMService().generateSumByKeyWord(node.content)) + "," + ",".join([child.summary for child in node.children])
            if node.node_type == "doc":
                node.summary=",".join(LLMService().generateSumByKeyWord(node.summary,10))
        unique_summary_parts = set(node.summary.split(","))
        node.summary = ",".join(unique_summary_parts)
        node.summary = node.summary.replace(",,", ",")
        while node.summary.startswith(","):
            node.summary = node.summary[1:]
        setattr(node, '_extra_info', geneType)  # 记录生成摘要的类型
        setattr(node, '_sim_score', [])
        return
    def getDPparam(self, node: Optional[fnode]) -> Tuple[int, float]:
        """
        根据节点的特性动态返回 top_k 和 cos_threshold。
        top_k 是在树的一层中，最多保留的节点数，用于进行递归向下检索。
        cos_threshold 是节点必须达到的相似度阈值，只有大于等于这个阈值的节点才会被经过 top_k 的限制后，进行递归向下检索。
        根据节点的深度或类型，可以调整这些值以优化搜索效率和结果的相关性。
        """
        DPparams = {
            "space": (15, 0.10),  # dir
            "doc": (15, 0.20),  # file
            "h1": (15, 0.20),   # h1
            "h2": (10, 0.20),   # h2
            "h3": (5, 0.20),    # h3
            "li": (5, 0.30),    # li
            "p": (5, 0.30)      # p
        }
        if node:
            top_k, cos_threshold = DPparams.get(node.node_type, (3, 0.30))
        else:
            # 使用数据库查询获取节点总数
            total_nodes = self.session.query(func.count()).select_from(fnode).scalar()
            top_k = min(max(10, math.ceil(total_nodes * 0.1)), 30)
            cos_threshold = 0.30
        return top_k, cos_threshold
    def ensureFullReturn(self, rootNode: fnode, user: str, ensure_full_return_type: str, check_permission: int,by:str) -> set[fnode]:
        """
        获取指定根节点下所有符合类型优先级要求的节点集合
        """
        ensure_search_Set = set([rootNode])
        if not ensure_full_return_type:
            return ensure_search_Set
            
        if ensure_full_return_type.lower() == "markdown":
            ensure_full_return_type = "doc"

        if ensure_full_return_type not in priority_map:
            print(colored(f"警告：ensure_full_return_type={ensure_full_return_type} 不在支持的类型范围内：{list(priority_map.keys())}", 'yellow'))
            ensure_full_return_type = "h2"

        ensure_priority = priority_map.get(ensure_full_return_type, 100)
        # 使用子树查询优化性能
        all_descendants = list(self.collect_all_descendants(rootNode))
        ensure_nodes=[]
        for node in all_descendants:
            if priority_map.get(node.node_type, 100) <= ensure_priority:
                try:
                    self.PM.pm(user,node,"read",by)
                except Exception as e:
                    # print(e)
                    continue
                setattr(node, '_extra_info', "ensure")
                ensure_nodes.append(node)
        ensure_search_Set.update(ensure_nodes)
        return ensure_search_Set

    def add_ancestor(self, rootNode: fnode, existingSet: set[fnode]) -> set[fnode]:
        """
        为现有节点集合添加所有祖先节点（限制在指定根节点的子树范围内）
        """
        WithAncesSet = set()
        for sn in existingSet:
            curN = sn
            while curN and curN != rootNode.parent:  # 确保不超出rootNode的子树范围
                WithAncesSet.add(curN)
                if curN.parent:
                    curN = curN.parent
                    if self.getextra(curN) not in ["search","match"]:
                        setattr(curN, '_extra_info', "ancestor")
                else:
                    break
        return WithAncesSet
    
    @timeit("light_cyan")
    def RAG(self, user: str, query: str, ensure_full_return_type: str, method: str, by:str,style:str="cat",subrootid:int=1,summaryContent:str="0:50",surpressPrint:bool=False) -> str:
        """
        by defines the context->permission check
        - filterstype:(''/s)/a -> only return searched results or all results(including ensure,ancestor)
        - showstyle:j/t/ct-> json/tree/coloredTree
        """
        self.session.flush()
        # self.session.commit()
        check_permission = P.READ|P.AGENT_READ if by == "agent" else P.READ
        print(colored(f"RAG -query {query} -efrt {ensure_full_return_type} -method {method} -by {by} -style {style} -subrootid {subrootid}", "light_cyan"))
        # 获取当前搜索的根节点
        sub_root = self._findNodeById(subrootid) or self.root()
        # 获取确保返回的节点集合
        ensure_search_Set = self.ensureFullReturn(sub_root, user, ensure_full_return_type, check_permission,by)
        # 处理常规查询
        if query:
            searchList: list[fnode] = []
            if method in ["regex", "literal"]:
                searchList = self.simpleLiteralSearch(user, query, use_regex=(method=="regex"), by=by)
            else:  # semantic 搜索
                searchList.extend(self.simpleLiteralSearch(user, query, use_regex=False, by=by))
                # ensure_search_Set.update(searchList)
                searchList.extend(self.DPsearch(user, list(ensure_search_Set), query, by=by))
            for node in searchList:
                sim_val = self.getsim(node)
                if sim_val and max(sim_val) >= 0.99:
                    setattr(node, '_extra_info', "match")
                else:
                    setattr(node, '_extra_info', "search")
            if "a" in style:
                ensure_search_Set.update(searchList)
            else:
                ensure_search_Set = set(searchList)
        if "a" in style:
            ensure_search_Set = self.add_ancestor(sub_root, ensure_search_Set)
        else:
            ensure_search_Set = set([node for node in ensure_search_Set if node.id!=1])
        nodeList = self.treeListPre(ensure_search_Set, node=sub_root)  # 从子根开始遍历
        nodeStr=self.pNodeList(nodeList,summaryContent,style)#type: ignore
        if not surpressPrint:
            print(nodeStr)
            sys.stdout.flush()
        for node in nodeList:
            setattr(node, '_sim_score', [])
            setattr(node, '_extra_info', "")
        return nodeStr
    def interact(self):
        while True:
            subrootid = input("请输入搜索的根节点id(默认为真正的根节点):") or 1
            search_text = input("请输入搜索内容：")
            efrt = input("请输入确保返回的类型：") or ""  # 使用 or 提供默认值
            method = input("请输入搜索方法(semantic*,regex,literal):") or "semantic"  # 使用 or 提供默认值
            style = input("请输入style(j/t/ct+s/a):") or "cat"  # 使用 or 提供默认值
            self.RAG("user1",search_text, efrt, method, "user",style,subrootid)
    def shareNode(self, user, nodeOrId:Union[int, fnode], permChar:str,share_user:str) -> Dict[str, Union[str, int]]: 
        """ 分享文件给其他用户
        - nodeOrId: 节点或节点 ID 列表
        - share_user: 要设置权限的用户 ID
        - permChar: "R" or "W" or "A" or "O"
        """
        if self.root().permission.get(share_user,-1)==-1:
            return self.err.INVALID_PARAM(f"sharing to a non-existent user: {share_user}")
        if nodeOrId==-1:
            return self.err.INVALID_PARAM(f"can not share root node {nodeOrId}")
        node=self._findNodeById(nodeOrId)
        if not node:
            return self.err.INVALID_PARAM(f"node: {nodeOrId} not found")
        try:
            self.PM.pm(user,node,"share","user")
        except Exception as e:
            # if node.node_type=="space":#本就分享空间，那user没权限也就没权限了
            #     return format_error(e)
            # elif node.node_type=="doc":#user虽然可能在这个doc节点上没有权限，但有可能在space节点上有own或admin权限？但是如果在space上有admin/own,对它的空间中所有节点有什么权限？
            return format_error(e)
        permission=P.NONE
        if permChar=="O":
            permission=P.OWNER
        elif permChar=="A":
            permission=P.ADMIN
        elif permChar=="W":
            permission=P.WRITE
        elif permChar=="R":
            permission=P.READ
        else:
            return self.err.INVALID_PARAM(f"permChar {permChar} 不在支持范围内:R or W or A or O")
        try:
            if node.node_type=="doc":
                self.setReculPermission(user,P.NONE,share_user,permission,node)
            elif node.node_type=="space":
                self.setPerm(node,share_user,permission,"add")
        except Exception as e:
            return format_error(e)
        if permChar=="O":
            self.setPerm(node,user,P.OWNER_ONLY,"clear")#a node can only be owned by one user
        self.shareEscalate(user,"share",share_user,P.READ,node)#question, what permission should be auto escalated to the share_user?
        return self.err.IF_SUCCESS_COMMIT(1)
if __name__ == "__main__":
    # 创建一个树实例
    tree = ftree()
    tree.addUser("user1")
    print(tree.root().permission)
    tree.agentPerm("user1",tree.root(),"R","add")
    if tree.session.query(fnode).count() <= 2:
        adict=tree.addNode("user1",tree.root(),"user1dir","space",by="user")
        testdir_id=int(adict.get("success",-1))
        f1_id=int(tree.buildMdTree_BE("user1",testdir_id, r"D:\CYBERLIFE_\NoteAgentTree3\test\openai.md", by="user")["success"])
    tree.interact()