# [Language Agent Tree Search](https://arxiv.org/abs/2310.04406) (LATS), by Zhou, et. al, is a general LLM agent search algorithm that combines reflection/evaluation and search (specifically monte-carlo trees search) to get achieve better overall task performance compared to similar techniques like ReACT, Reflexion, or Tree of Thoughts.

from ApiTools import apiBase,apiTools
import math
import json
from collections import deque
from typing import Optional
from langchain_core.messages import  HumanMessage
from pydantic import BaseModel, Field
import copy
from ApiVector import VectInfo
from dotenv import load_dotenv
load_dotenv()


# 文档的名称列表
expert_name = apiBase.argv(1,"ball")
sys_prompt = apiBase.argv(2,'''You are a data prediction expert.Data prediction rules:
1. The data is divided into two parts: red ball and blue ball.
2. The red ball number is selected from 6 of 1-33, and the array words cannot be repeated;
3. The blue ball number is selected from 1 to 16, and the array words cannot be repeated;''')
expert_info = {"collects":[{"name":expert_name,"vctname":"VCT_ADJUST","topn":6,"start":0,"threshold":0.3}],
"score":{"name":expert_name+"-score","vctname":"VCT_ADJUST","topn":6,"start":0,"threshold":0.3}
}
# 通过大模型完成任务里面某个子项来 计算分数，一直把任务里面全部任务顺利完成
task_list = apiBase.argv_json(3,[ {"q":'2024-12-08;2024141', "a":"","score":0}])

py_code='''def tools(param):
    return {"code":400,"message":"error"} 
'''
verify_tools = apiBase.str_func(py_code)
llm = apiTools.llm
vectdb = apiTools.load_vec()

class Reflection(BaseModel):
    reflections: str = Field(
        description="The critique and reflections on the sufficiency, superfluency,"
        " and general quality of the response"
    )
    score: int = Field(
        description="Score from 0-10 on the quality of the candidate response.",
        gte=0,
        lte=10,
    )
    found_solution: bool = Field(
        description="Whether the response has fully solved the question or task."
    )
    def as_message(self):
        return HumanMessage(
            content=f"Reasoning: {self.reflections}\nScore: {self.score}"
        )

    @property
    def normalized_score(self) -> float:
        return self.score / 10.0

class Node:
    def __init__(
        self,
        messages: list,
        reflection: Reflection,
        parent: Optional["Node"] = None,
    ):
        self.messages = messages
        self.parent = parent
        self.children = []
        self.value = 0
        self.visits = 0
        self.reflection = reflection
        self.depth = parent.depth + 1 if parent is not None else 1
        self._is_solved = reflection.found_solution if reflection else False
        if self._is_solved:
            self._mark_tree_as_solved()
        self.backpropagate(reflection.normalized_score)

    def __repr__(self) -> str:
        return (
            f"<Node value={self.value}, visits={self.visits},"
            f" solution={self.messages} reflection={self.reflection}/>"
        )

    @property
    def is_solved(self):
        """If any solutions exist, we can end the search."""
        return self._is_solved

    @property
    def is_terminal(self):
        return not self.children

    @property
    def best_child_score(self):
        """Return the child with the highest value."""
        if not self.children:
            return None
        return max(self.children, key=lambda child: int(child.is_solved) * child.value)

    @property
    def height(self) -> int:
        """Check for how far we've rolled out the tree."""
        if self.children:
            return 1 + max([child.height for child in self.children])
        return 1

    def upper_confidence_bound(self, exploration_weight=1.0):
        """Return the UCT score. This helps balance exploration vs. exploitation of a branch."""
        if self.parent is None:
            raise ValueError("Cannot obtain UCT from root node")
        if self.visits == 0:
            return self.value
        # Encourages exploitation of high-value trajectories
        average_reward = self.value / self.visits
        # Encourages exploration of less-visited trajectories
        exploration_term = math.sqrt(math.log(self.parent.visits) / self.visits)
        return average_reward + exploration_weight * exploration_term

    def backpropagate(self, reward: float):
        """Update the score of this node and its parents."""
        node = self
        while node:
            node.visits += 1
            node.value = (node.value * (node.visits - 1) + reward) / node.visits
            node = node.parent

    def get_messages(self, include_reflections: bool = True):
        if include_reflections:
            return self.messages + [self.reflection.as_message()]
        return self.messages

    def get_trajectory(self) -> list:
        """Get messages representing this search branch."""
        return self.messages

    def _get_all_children(self):
        all_nodes = []
        nodes = deque()
        nodes.append(self)
        while nodes:
            node = nodes.popleft()
            all_nodes.extend(node.children)
            for n in node.children:
                nodes.append(n)
        return all_nodes

    def get_best_solution(self):
        """Return the best solution from within the current sub-tree."""
        all_nodes = [self] + self._get_all_children()
        best_node = max(
            all_nodes,
            # We filter out all non-terminal, non-solution trajectories
            key=lambda node: int(node.is_terminal and node.is_solved) * node.value,
        )
        return best_node

    def _mark_tree_as_solved(self):
        parent = self.parent
        while parent:
            parent._is_solved = True
            parent = parent.parent

from typing_extensions import TypedDict
class TreeState(TypedDict):
    # The full tree
    root: Node
    # The original input
    input: str

def get_reflection(tasks,cand_expert):
    vectdb.random_topn(cand_expert['collects'])
    reflection = Reflection(reflections="",score=0,found_solution=False)
    reflection.score=0
    for task in tasks:
        # 对res进行验证，如果验证通过1分；如果验证不通过，可以通过llm对关联性打分
        if task['score'] == 1:
            reflection.score+=1
            continue
        task=vectdb.fixQA(cand_expert['collects'], sys_prompt,task['q'],verify_tools)
        #task=vectdb.askQA([cand_expert['collects']],score_prompt,task['q'])
        if task['score'] == 1:
            reflection.score+=1
            continue
        # 如果评分为0，再通过向量库的评分，让大模型返回一个分数
        score_prompt="The critique and the Score from 0-1 on the quality of the candidate response.The output should only the SCORE without any explanation."
        task['score']=vectdb.askQA([cand_expert['score']],score_prompt,task['a'])
        try:
            # 有时返回不是数字
            reflection.score+=float(task['score'])
        except Exception as e:
            pass
    # 全部解决
    if reflection.score == len(tasks):
        reflection.found_solution=True
    else:
        reflection.found_solution=False
    return reflection

# ### Initial Response
# 
# We start with a single root node, generated by this first step. It responds to the user input either with a tool invocation or a response.

# %%
from langchain_core.prompt_values import ChatPromptValue
from langchain_core.runnables import RunnableConfig

# %%
# Define the node we will add to the graph
def generate_initial_response(state: TreeState) -> dict:
    """Generate the initial candidate response."""
    # 复制出新的task数组，用于生成新的节点
    reflection =get_reflection(task_list,expert_info)
    root = Node(task_list, reflection=reflection)
    return {
        **state,
        "root": root,
    }
    
# %%

def select(root: Node) -> dict:
    """Starting from the root node a child node is selected at each tree level until a leaf node is reached."""
    if not root.children:
        return root
    node = root
    while node.children:
        max_child = max(node.children, key=lambda child: child.upper_confidence_bound())
        node = max_child
    return node

def expand(state: TreeState, config: RunnableConfig) -> dict:
    """Starting from the "best" node in the tree, generate N candidates for the next step."""
    root = state["root"]
    best_candidate: Node = select(root)
    tasks = best_candidate.get_trajectory()
    
    #重新排名:前K个结果不一定按最相关的方式排序,最相关的块可能是第5或第7个，而不是第1或第2个
    child_nodes=[]
    for i in range(3):
        cand_tasks= copy.deepcopy(tasks)        
        #cand_expert= copy.deepcopy(expert_info)
        reflection = get_reflection(cand_tasks,expert_info)        
        child_nodes.append(Node(cand_tasks, parent=best_candidate, reflection=reflection))
    best_candidate.children.extend(child_nodes)
    # We have already extended the tree directly, so we just return the state
    return state

# ## Create Graph
# 
# With those two nodes defined, we are ready to define the graph. After each agent step, we have the option of finishing.

# %%
from typing import Literal
from langgraph.graph import END, StateGraph, START
def should_loop(state: TreeState) -> Literal["expand", "__end__"]:
    """Determine whether to continue the tree search."""
    root = state["root"]
    if root.is_solved:
        return END
    if root.height > 10:
        return END
    return "expand"

builder = StateGraph(TreeState)
builder.add_node("start", generate_initial_response)
builder.add_node("expand", expand)
builder.add_edge(START, "start")
builder.add_conditional_edges(
    "start",
    # Either expand/rollout or finish
    should_loop,
)
builder.add_conditional_edges(
    "expand",
    # Either continue to rollout or finish
    should_loop,
)

graph = builder.compile()
# %%
#question = "Generate a table with the average size and weight, as well as the oldest recorded instance for each of the top 5 most common birds."
last_step = None
for step in graph.stream({"input": "start"}):
    last_step = step
    step_name, step_state = next(iter(step.items()))
    # print(step_name)
    # print("rolled out: ", step_state["root"].height)
    # print("---")

# %%
if "expand" in last_step:
    solution_node = last_step["expand"]["root"].get_best_solution()
else:
    solution_node = last_step["start"]["root"].get_best_solution()
best_trajectory = solution_node.get_trajectory()
ret=json.dumps(best_trajectory, ensure_ascii=False)
print(ret)

