import json
from typing import List

from tree_rag.dataclasses.api import Knowledge
from tree_rag.dataclasses.knowledge import ALL_NODES_TYPE, Graph

from . import DEFAULT_MODEL_NAME, client

SYSTEM_PROMPT = """{role_prompt}
任务描述：
的任务是给定市民的输入，抽取关键信息。
以下是一些知识体系标签供你参考，抽取的内容必须从这些标签中选择。如果你无法从这些标签中找到答案，请将相应字段置空。抽取结果以json格式返回。
不确定的不要抽，宁缺毋滥

格式示例：
```json
{{
    "level_1": [],
    "level_2": [],
    "scene": [
        {{
            "label": "",
            "category": "",
        }}
    ]
}}
```

## 参考关键词
{context}
"""


def build_system_prompt(all_nodes: ALL_NODES_TYPE, role_prompt: str) -> str:
    """Generate the system prompt for the agent.

    Args:
        context: The call context.
    """
    scene_text = ""
    for category, scenes in all_nodes.all_scene.items():
        scene_text += f"### {category}\n"
        scene_text += "\n".join(scenes)
        scene_text += "\n"
    context_str = f"""## 一级事项
{'\n'.join(all_nodes.all_level_1)}

## 二级事项
{'\n'.join(all_nodes.all_level_2)}

## 场景引导
{scene_text}
"""
    return SYSTEM_PROMPT.format(context=context_str, role_prompt=role_prompt)


fix_examples = [
    Knowledge(
        dataset_id="xxx",
        segment_id="4xx",
        tenant_id="xxx",
        channel_id="xxx",
        question="你好",
        answer="",
        graph=Graph(
            level_1=[],
            level_2=[],
            scene=[],
            keywords=[],
        ),
    ),
]


def validate_graph(graph: Graph, all_nodes: ALL_NODES_TYPE) -> Graph:
    """Validate and filter graph to only include valid elements from all_nodes."""
    validated_level_1 = [item for item in graph.level_1 if item in all_nodes.all_level_1]
    validated_level_2 = [item for item in graph.level_2 if item in all_nodes.all_level_2]
    
    validated_scene = []
    for scene_item in graph.scene:
        if scene_item.category in all_nodes.all_scene:
            if scene_item.label in all_nodes.all_scene[scene_item.category]:
                validated_scene.append(scene_item)
    
    return Graph(
        level_1=validated_level_1,
        level_2=validated_level_2,
        scene=validated_scene,
        keywords=graph.keywords,
    )


def build_examples(all_nodes: ALL_NODES_TYPE) -> List[Knowledge]:
    level_1 = all_nodes.all_level_1[0]

    examples = [
        Knowledge(
            dataset_id="xxx",
            segment_id="4xx",
            tenant_id="xxx",
            channel_id="xxx",
            question=level_1,
            answer="",
            graph=Graph(
                level_1=[level_1],
                level_2=[],
                scene=[],
                keywords=[],
            ),
        ),
    ]
    if all_nodes.all_level_2:
        level_2 = all_nodes.all_level_2[0]

        examples.append(
            Knowledge(
                dataset_id="xxx",
                segment_id="4xx",
                tenant_id="xxx",
                channel_id="xxx",
                question=level_2,
                answer="",
                graph=Graph(
                    level_1=[],
                    level_2=[level_2],
                    scene=[],
                    keywords=[],
                ),
            )
        )
    if all_nodes.all_scene_category:
        category = all_nodes.all_scene_category[0]
        scene = all_nodes.all_scene[category][0]
        examples.append(
            Knowledge(
                dataset_id="xxx",
                segment_id="4xx",
                tenant_id="xxx",
                channel_id="xxx",
                question=scene,
                answer="",
                graph=Graph(
                    level_1=[],
                    level_2=[],
                    scene=[{"label": scene, "category": category}],
                    keywords=[],
                ),
            )
        )

    return examples


async def run_agent(
    query: str, all_nodes: ALL_NODES_TYPE, examples: List[Knowledge], role_prompt: str
) -> Graph:
    """Run the agent.

    Args:
        context: The call context.
    """
    if not all_nodes.all_level_1:
        return Graph(
            level_1=[],
            level_2=[],
            scene=[],
            keywords=[],
        )
    user_prompt = f"用户输入：{query}"
    system_prompt = build_system_prompt(all_nodes, role_prompt)

    messages = [{"role": "system", "content": system_prompt}]
    if not examples:
        examples = fix_examples + build_examples(all_nodes)
    else:
        examples = fix_examples + examples
    for example in examples:
        messages.append({"role": "user", "content": example.question})
        # TODO: remove keywords here
        graph = example.graph.model_dump()
        graph.pop("keywords")
        content = json.dumps(graph, indent=4, ensure_ascii=False)
        messages.append({"role": "assistant", "content": content})

    messages.append({"role": "user", "content": user_prompt})

    chat_completion = await client.chat.completions.create(
        messages=messages,
        model=DEFAULT_MODEL_NAME,
        temperature=0,
    )
    response_str = chat_completion.choices[0].message.content

    graph = Graph.from_llm_response(response_str)
    validated_graph = validate_graph(graph, all_nodes)
    return validated_graph
