"""
方案一：
A (读取pdf, 输入：文件地址 file_path): 获取pdf中各种文字、图片 输出 dict
B (识别pdf图片语义, 输入 pdf-dict): 为图片打标  输出 pdf-dict | 图片-dict(k-v)

C (将打标图片、pdf文本 (dict)、prompt (str)作为输入): 输出 按照模板和要求总结的产品推荐数据结构对象 AIMessage

D (将C中内容作为输入 AIMessage): 将数据结构对象填入模板，导出对应文档
"""
import argparse
import json
import os

from typing_extensions import TypedDict

from langchain_core.messages import BaseMessage
from langgraph.graph import StateGraph

from app.project.doc_to_recommendation.utils.config_loader import load_config, get_config_path
from app.project.doc_to_recommendation.llm.llm_task import init_model
from app.project.doc_to_recommendation.llm.model.base_model import BaseModel

from app.project.doc_to_recommendation.prdt_doc_parse import parse_ocr_layout_pdf
from app.project.doc_to_recommendation.utils.generate_utils import genertate_output_md


TEMPLATE_DEFAULT = get_config_path("prdt_template.json")
CONFIG_PATH_DEFAULT = get_config_path("tongyi_ocr_layout.yaml")

# 定义State concurrent_log_handler
class PDState(TypedDict):
    file_path: str # 文件地址
    file_content: dict # 文件解析结果
    config_path: str # 配置地址
    config: dict # 配置解析后字典
    model_img: BaseModel
    model_gen: BaseModel
    ai_result: BaseMessage # 生成的产品推荐手册文本信息数据结构
    save_dir: str # 保存路径
    template_path: str # 模板路径
    text_prompt: str # 文本生成任务提示词
    img_prompt: str # 图片语义任务提示词
    template: str # 模板
    figures_fin: list[str] # 图片语义llm返回结果数组
    result_file: str # 结果文件绝对路径
    ex_config: dict

# 定义agent_node

def agent_node(state: PDState) -> PDState:
    """
    Processes document content and figures to generate AI recommendations.
    
    Args:
        state (PDState): Input state containing file content and models. Expected keys:
            - 'file_content': Dict with 'contents' (list of text sections) and 'figures' (list of (caption, image) tuples)
            - 'model_img': Image processing model
            - 'model_gen': Text generation model
    
    Returns:
        PDState: Modified state with AI results stored under 'ai_result' key. The result contains:
            - Processed text content (joined sections)
            - Processed figures (filtered to valid image results)
            - AI-generated recommendations from combined inputs
    """
    content = '\n'.join([element for sublist in state['file_content']['contents'] for element in sublist])
    message = json.dumps({'文本材料': content, '图片材料': state['figures_fin']}, ensure_ascii=False)

    print("==============model_gen request===============", message)
    state['ai_result'] = state['model_gen'].agent_calls(text=message, image=None, prompt=state['text_prompt'])
    return state


def img_comprehension_node(state: PDState) -> PDState:
    """
    调用llm解析图片的中文含义，并将结果放到figures_fin中
    """
    figures = state['file_content']['figures']
    figures_fin = []
    # for figure in figures:
        # img_result = state['model_img'].agent_calls(text="", image=figure, prompt=state['img_prompt'])
        # figures_fin.append(img_result) if img_result['type'] == 'img' else None
    state['figures_fin'] = figures_fin
    return state

def img_label_node(state: PDState) -> PDState:
    """
    将图片区域内的文本信息直接放到图片解析结果里，跳过llm解析图片的过程
    """
    figures = state['file_content']['figures']
    state['figures_fin'] = [fingure_text['label'] for fingure_text in figures]
    return state


# 定义parse_pdf_node

def parse_pdf(state: PDState) -> PDState:
    state['file_content'] = parse_ocr_layout_pdf(state['file_path'], state['save_dir'], state['config'])
    return state


# 定义generation_node

def generation_doc(state: PDState) -> PDState:
    content = state['ai_result']
    file_name = os.path.splitext(os.path.basename(state['file_path']))[0]
    if not os.path.exists(state['save_dir']):
        os.makedirs(state['save_dir'])
    result_file_path = os.path.join(state['save_dir'], f"{file_name}.txt")
    state['result_file'] = genertate_output_md(result_file_path, content)
    return state


def init_node(state: PDState) -> PDState:
    # 读config配置
    config_path = state['config_path']
    state['config'] = config = load_config(config_path)
    if state['ex_config']:
        # 将外部配置添加到config中
        config.update(state['ex_config'])
    llm_model_gen = config['llm_node']['gen']
    llm_model_img = config['llm_node']['img']
    # 获取模板以及提示词
    state['text_prompt'], state['img_prompt'], state['template'] = gen_template(config.get('template_path', TEMPLATE_DEFAULT))
    state['model_gen'] = init_model(llm_model_gen['name'], config = llm_model_gen)
    state['model_img'] = init_model(llm_model_img['name'], config = llm_model_img)

    return state

def router_switch(state: PDState) -> str:
    config = state['config']['img_process']
    if config.get("img_comprehension", False):
        # 使用图片给llm理解
        return "img_comprehension_node"
    else:
        return "img_label_node"

# 图定义
workflow = StateGraph(PDState)
workflow.add_node("init_node", init_node)
workflow.add_node("parse_pdf_node", parse_pdf)
workflow.add_node("agent_node", agent_node)
workflow.add_node("generation_node", generation_doc)
workflow.add_node("img_comprehension_node", img_comprehension_node)
workflow.add_node("img_label_node", img_label_node)
workflow.set_entry_point("init_node")
workflow.add_edge("init_node", "parse_pdf_node")
workflow.add_conditional_edges("parse_pdf_node", router_switch, {
    "img_comprehension_node": "img_comprehension_node",
    "img_label_node": "img_label_node"
})
workflow.add_edge("img_comprehension_node", "agent_node")
workflow.add_edge("img_label_node", "agent_node")
workflow.add_edge("agent_node", "generation_node")
workflow.set_finish_point("generation_node")


def gen_template(template_path):
    template_conf = load_config(template_path, mode="json")
    global_text_prompts = template_conf['global_settings']['text_prompt']
    section_details = template_conf['section_settings']['section_details']
    text_prompt = [global_text_prompt for global_text_prompt in global_text_prompts]
    text_prompt.append("章节说明：")
    text_prompt.append(template_conf['section_settings']['prefix'])
    for key in section_details:
        details = section_details[key]
        text_prompt.append(key + "：")
        text_prompt.append("说明：")
        for detail in details['content_requirements']:
            text_prompt.append(detail)
        text_prompt.append("该部分字数限制：" + str(details['word_limit']))
    text_prompt.append(template_conf['section_settings']['suffix'])
    text_prompt = '\n'.join(text_prompt)
    global_img_prompts = template_conf['global_settings']['img_prompt']
    img_prompt = '\n'.join(global_img_prompts)
    return text_prompt, img_prompt, list(section_details.keys())

def process(input_path: str, save_dir: str, config_path: str | None, ex_config: dict | None = None):
    graph = workflow.compile()
    input_obj = {
        'file_path': input_path,
        'save_dir': save_dir,
        'config_path': config_path if config_path is not None else CONFIG_PATH_DEFAULT,
        'ex_config': ex_config if ex_config is not None else {}
        }
    result = graph.invoke(input_obj)
    return result['result_file']

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Run a task with a given configuration file.")
    parser.add_argument('-i', '--input', type=str, required=True, help='Path to the input file.')
    parser.add_argument('-o', '--output', type=str, required=True, help='Dir to save output file.')
    parser.add_argument('-c', '--config', type=str, required=False, help='Dir to config file.')
    args = parser.parse_args()
    process(args.__dict__['input'], args.__dict__['output'], args.__dict__['config'])