
from app.utils.logger_util import user_logger
import json
import datetime
from typing import Dict, Any, List, Optional
from langchain_core.messages import AIMessage
from langgraph.constants import END
from langgraph.graph import add_messages

from app.models import SessionState, CustomEncoder
import os
import glob

message_str='''{
	"type": "",
	"message": 
		{
			"desc": "",
			"data": ""	
		}
    }'''
def generate_report_node(state: SessionState,config:dict) -> SessionState:
    """生成最终数据需求分析报告的节点"""
    new_state = state.copy()

    thread_id=config.get("configurable",{}).get("thread_id")


    message_dict = json.loads(message_str)
    message_dict["type"] = "节点"
    message_dict["message"]["desc"] = "需求方案交付"
    filtered_state = {
        key: value for key, value in new_state.items()
        if key not in ['field_availability','messages']
    }
    message_dict["message"]["data"] = "*方案数据结构："+str(state)
    #message_dict["message"]["data"] = json.dumps(state,  cls=CustomEncoder,ensure_ascii=False)

    user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))


    maxmessage = new_state.get("messages")[-1].content
    if "最大次数" in maxmessage:
        print(f"助手: {maxmessage}")

    # 生成报告内容
    try:
        report_content = generate_data_requirement_report(new_state, config)

        # 保存报告到文件（需要传入 config 获取 thread_id 作为文件名）
        report_filename = save_report_to_file(report_content, config)

        # 更新状态
        cont = ""
        count=new_state['user_loop_count']
        if not new_state.get('field_availability'):
            cont = "由于网络问题或者系统繁忙，未能获取到字段分析结果，现在为您生成当前的最终报告。"
        elif count > 3:# _MAX_ATTEMPTS = 3  # 字段修改最大次数
            #assistant_response = new_state["messages"][-1].content if new_state.get("messages") else ""
            cont = "已达到最大沟通次数3次，现在为您生成最终报告。"
        else:
            cont = "已完成字段级别的需求分析，现在为您生成最终报告。"


        message_dict["type"]="用户"
        message_dict["message"]["desc"]="结束"

        # 同步记录 md 与 pdf 的保存路径
        data_to_dump = {
            'data_struct':  f"{cont}报告已保存至: {report_filename}、{thread_id}.pdf"
        }
        message_dict["message"]["data"]=json.dumps(data_to_dump,ensure_ascii=False)
        user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))

        # 同时告知保存的 PDF 文件名（与 md 同名）
        pdf_filename = f"{thread_id}.pdf"

        new_state["messages"] = add_messages(
            state["messages"],
            [AIMessage(content=f"{cont}报告已保存至: {report_filename}、{pdf_filename}")]
        )

        print(f"数据需求分析报告已生成完成，请查看: {report_filename}、{pdf_filename}")

    except Exception as e:
        print(f"生成报告时出现错误: {str(e)}")
        new_state["messages"] = add_messages(
            state["messages"],
            [AIMessage(content=f"报告生成失败: {str(e)}")]
        )

    new_state["next"] = END
    message_dict["type"] = "结束"
    message_dict["message"]["desc"] = "结束"
    message_dict["message"]["data"] = "结束"
    user_logger.info(thread_id, json.dumps(message_dict, ensure_ascii=False))
    return new_state


def generate_data_requirement_report(state: SessionState, config: Optional[dict] = None) -> str:
    """
    根据SessionState生成完整的数据需求分析报告

    Args:
        state: 包含所有分析结果的SessionState

    Returns:
        str: 格式化的报告内容
    """

    # 解析状态中的数据
    require_json = _parse_json_safely(state.get("require_json"))
    field_availability = state.get("field_availability", {})
    user_require = state.get("user_require", [])
    process_solution = state.get("process_solution", "")
    collect_solution = state.get("collect_solution", "")

    # 生成报告各部分
    overview_section = _generate_overview_section(require_json, field_availability, user_require)
    # 提取 thread_id 以从用户日志中生成思维链路
    thread_id = None
    try:
        if isinstance(config, dict):
            thread_id = (config.get("configurable") or {}).get("thread_id")
    except Exception:
        thread_id = None
    thinking_chain_section = _generate_thinking_chain_section(state, thread_id)
    requirement_list_section = _generate_requirement_list_section(require_json, user_require, field_availability)
    detailed_analysis_section = _generate_detailed_analysis_section(field_availability)
    next_steps_section = _generate_next_steps_section(field_availability, process_solution, collect_solution)
    appendix_section = _generate_appendix_section()

    # 组合完整报告
    report = f"""
# 数据需求分析报告

## 一、概述
{overview_section}

## 二、思维链路
{thinking_chain_section}

## 三、数据需求清单
{requirement_list_section}

## 四、详细分析
{detailed_analysis_section}

## 五、下一步动作
{next_steps_section}

## 六、附录
{appendix_section}
    """.strip()

    return report


def _generate_overview_section(require_json: Dict, field_availability: Dict, user_require: List[str]) -> str:
    """
    生成概述部分

    Args:
        require_json: 需求JSON数据
        field_availability: 字段可用性分析结果
        user_require: 用户需求列表

    Returns:
        str: 概述部分内容
    """

    # 从require_json中提取需求内容
    requirement_content = _extract_requirement_content(require_json)

    # 从field_availability中获取字段统计
    field_stats = _analyze_field_statistics(field_availability)

    # 生成建议
    recommendation = _generate_recommendation(field_stats)

    overview = f"本需求内容为：{requirement_content}。\n\n涉及字段共 {field_stats['total_fields']} 个，直接具备率 {field_stats['availability_rate']:.1f}% ，需采集 {field_stats['missing_fields']} 个字段。\n\n建议：{recommendation}。"

    return overview


def _generate_thinking_chain_section(state: SessionState, thread_id: Optional[str] = None) -> str:
    """
    生成思维链路部分

    Args:
        state: SessionState对象

    Returns:
        str: 思维链路部分内容
    """

    placeholder = (
        "[此部分内容将从系统日志中提取分析过程的思考摘要]\n"
        "[包含需求分类、字段匹配、方案生成等环节的要点]"
    )

    # 缺少 thread_id 时返回占位
    if not thread_id:
        return placeholder

    try:
        # 日志路径：app/logs/user/{thread_id}-{YYYYMMDD}.log
        base_dir = os.path.dirname(os.path.abspath(__file__))
        user_log_dir = os.path.normpath(os.path.join(base_dir, '..', 'logs', 'user'))
        today = datetime.datetime.now().strftime('%Y%m%d')
        target_path = os.path.join(user_log_dir, f"{thread_id}-{today}.log")

        if not os.path.exists(target_path):
            # 兜底：寻找 thread_id 对应的最新日志文件
            pattern = os.path.join(user_log_dir, f"{thread_id}-*.log")
            candidates = sorted(glob.glob(pattern))
            if candidates:
                target_path = candidates[-1]
            else:
                return placeholder

        entries: List[Dict[str, Any]] = []
        with open(target_path, 'r', encoding='utf-8') as f:
            for line in f:
                line = (line or '').strip()
                if not line:
                    continue
                try:
                    obj = json.loads(line)
                    if isinstance(obj, dict):
                        entries.append(obj)
                except Exception:
                    continue

        if not entries:
            return placeholder

        # 仅提取 type == '规划' 的 data 内容
        plan_texts: List[str] = []
        for e in entries:
            if e.get('type') != '规划':
                continue
            msg = e.get('message')
            data_val = None
            if isinstance(msg, dict):
                data_val = msg.get('desc')
            if not data_val:
                continue
            # data 可能是 JSON 字符串或普通文本
            txt = None
            if isinstance(data_val, str):
                dv = data_val.strip()
                if (dv.startswith('{') and dv.endswith('}')) or (dv.startswith('[') and dv.endswith(']')):
                    try:
                        dv_obj = json.loads(dv)
                        if isinstance(dv_obj, dict):
                            # 优先 data_struct / summary / desc
                            txt = dv_obj.get('data_struct') or dv_obj.get('summary') or dv_obj.get('desc') or json.dumps(dv_obj, ensure_ascii=False)
                        elif isinstance(dv_obj, list):
                            txt = '；'.join([str(x) for x in dv_obj if x])
                    except Exception:
                        txt = dv
                else:
                    txt = dv
            elif isinstance(data_val, dict):
                txt = data_val.get('data_struct') or data_val.get('summary') or data_val.get('desc') or json.dumps(data_val, ensure_ascii=False)
            else:
                txt = str(data_val)

            if txt:
                # 清洗标记与换行
                clean = txt.replace('\n', ' ').replace('\r', ' ').replace('*', '').strip()
                if clean:
                    plan_texts.append(clean)

        if not plan_texts:
            return placeholder

        # 组合与润色为约100字的链路
        # 选取前若干条，强调思考步骤感
        core = '；'.join(plan_texts[:4])
        base = f"围绕本次需求，规划梳理为：{core}据此串联数据来源、字段匹配、加工与交付路径，形成可落地方案。"
        # if len(base) > 150:
        #     base = base[:150]
        #     if not base.endswith('。'):
        #         base += '…'
        return base
    except Exception:
        return placeholder


def _generate_requirement_list_section(require_json: Dict, user_require: List[str], field_availability: Optional[Dict] = None) -> str:
    """
    生成数据需求清单部分

    Args:
        require_json: 需求JSON数据
        user_require: 用户需求交互历史

    Returns:
        str: 需求清单部分内容
    """

    # 提取沟通轮数
    communication_rounds = len(user_require) if user_require else 1

    # 需求简述
    requirement_details = _extract_detailed_requirements(require_json)
    description = requirement_details.get('description', '具体需求内容待明确')

    # 从 require_json 判断是否涉及敏感五元组
    def _to_bool(val) -> Optional[bool]:
        if isinstance(val, bool):
            return val
        if isinstance(val, str):
            v = val.strip().lower()
            if v in {"true", "1", "yes", "y", "是"}:
                return True
            if v in {"false", "0", "no", "n", "否"}:
                return False
        return None

    parsed = require_json.get('parsed_require', {}) if isinstance(require_json, dict) else {}
    sensitive_type_val = parsed.get('sensitive_type', require_json.get('sensitive_type'))
    is_sensitive = _to_bool(sensitive_type_val)
    if is_sensitive is True:
        sensitive_desc = "需求中涉及疑似敏感数据五元组字段。"
    elif is_sensitive is False:
        sensitive_desc = "需求中不涉及敏感五元组字段。"
    else:
        sensitive_desc = "敏感信息涉及情况未明确。"

    # 获取字段名称列表：与“详细分析”表格中的“字段名称”一致
    field_names: List[str] = []

    # 优先从字段分析结果中取（与详细分析一致的数据来源）
    if field_availability and isinstance(field_availability, dict):
        for f in field_availability.get('fields_info', []) or []:
            name = f.get('original_field')
            if name:
                field_names.append(str(name))

    # 兜底：从 require_json 的 parsed_require.fields_info 中提取
    if not field_names and isinstance(parsed, dict):
        for f in parsed.get('fields_info', []) or []:
            name = f.get('original_field')
            if name:
                field_names.append(str(name))

    # 去重并保持顺序
    seen = set()
    dedup_fields = []
    for n in field_names:
        if n not in seen:
            dedup_fields.append(n)
            seen.add(n)

    fields_line = (
        f"涉及到的字段信息为：{('、'.join(dedup_fields)) if dedup_fields else '未明确'}"
    )

    requirement_list = (
        f"经过 {communication_rounds} 轮沟通，最终明确具体需求为：{description}\n\n"
        f"{sensitive_desc}\n\n"
        f"{fields_line}"
    )

    return requirement_list


def _generate_detailed_analysis_section(field_availability: Dict) -> str:
    """
    生成详细分析部分

    Args:
        field_availability: 字段可用性分析结果

    Returns:
        str: 详细分析部分内容
    """

    if not field_availability or 'fields_info' not in field_availability:
        return "[字段分析结果待完善]"

    fields_info = field_availability.get('fields_info', [])
    total_fields = len(fields_info)

    # 分类统计字段
    direct_use = []
    simple_calc = []
    need_process = []
    need_collect = []

    for field in fields_info:
        status = field.get('status', '0')
        match_status = field.get('match_status', '')
        original_field = field.get('original_field', '')

        if status == '1':
            if '直接使用' in match_status:
                direct_use.append(original_field)
            elif '加工' in match_status:
                simple_calc.append(original_field)
            else:
                need_process.append(original_field)
        else:
            need_collect.append(original_field)

    # 生成详细分析表格
    analysis_table = _generate_analysis_table(fields_info)

    detailed_analysis = (
        f"共涉及 {total_fields} 个字段，其中直接在数据库中匹配出了 {len(direct_use) + len(simple_calc)} 个。\n\n"
        f"1. {', '.join(direct_use[:3]) if direct_use else '无'} 可直接从现有表中查询使用；\n"
        f"2. {', '.join(simple_calc[:3]) if simple_calc else '无'} 未直接找到匹配，但可通过现有表中简单计算使用；\n"
        f"3. {', '.join(need_process[:3]) if need_process else '无'} 在库中找到了匹配，但因主键/账期/数据内容等不符合要求，需要加工后产出；\n"
        f"4. {', '.join(need_collect[:3]) if need_collect else '无'} 未找到合适匹配，需要新采集数据后方可使用。\n\n"
        f"{analysis_table}"
    )

    return detailed_analysis


def _generate_next_steps_section(field_availability: Dict, process_solution: str, collect_solution: str) -> str:
    """
    生成下一步动作部分

    Args:
        field_availability: 字段可用性分析结果
        process_solution: 加工方案
        collect_solution: 采集方案

    Returns:
        str: 下一步动作部分内容
    """

    # 获取需要采集和加工的字段
    collect_fields = []
    process_fields = []

    if field_availability and 'fields_info' in field_availability:
        for field in field_availability['fields_info']:
            status = field.get('status', '0')
            match_status = field.get('match_status', '')
            original_field = field.get('original_field', '')

            if status == '0':
                collect_fields.append(original_field)
            elif '加工' in match_status:
                process_fields.append(original_field)

    next_steps = (
        "- Step1 数据采集\n"
        f"  {'共需采集 ' + ', '.join(collect_fields[:2]) + ' 字段，' if collect_fields else '  无需额外数据采集。'}经源端数据调研后，在数据质量管理平台发起工单，由业务洞察负责采集入库，一般具备时间为 3-5 工作日（具体视排期确定）。\n\n"
        "- Step2 数据加工\n"
        f"  {'使用现有表中的字段进行加工处理，' if process_fields else '  无需数据加工。'}加工落地，经业务洞察审批上线后，可使用。字段标准化流程：先创建模型，审批上线后创建程序，程序上线后可能需追溯数据至指定日期。\n\n"
        "- Step3 数据交付\n"
        "  若交付方式为租户订购，待需求承接方加工完成后，需将加工后的表上架 UCX 并提供相关产品 ID 后，方可在梧桐数据开放平台上发起订购。"
    )

    return next_steps


def _generate_appendix_section() -> str:
    """
    生成附录部分

    Returns:
        str: 附录部分内容
    """

    appendix = (
        "1. 业务洞察人员分工表格\n"
        "2. 数据质量管理平台调研文本（如涉采集）"
    )

    return appendix


def _generate_requirement_table(requirement_details: Dict) -> str:
    """
    生成需求表格

    Args:
        requirement_details: 需求详细信息

    Returns:
        str: 格式化的需求表格
    """

    # 简化的表格格式
    table = """
字段名称 | 附加要求
---------|----------
[根据具体需求动态生成]"""

    return table


def _generate_analysis_table(fields_info: List[Dict]) -> str:
    """
    生成字段分析表格

    Args:
        fields_info: 字段信息列表

    Returns:
        str: 格式化的分析表格
    """

    if not fields_info:
        return "暂无字段分析数据"

    # 生成标准 Markdown 表格（5 列）
    # header = "字段名称 | 附加要求 | 推荐使用 | 使用方式 | Top3 匹配"
    header = "字段名称 | 推荐使用 | 使用方式 | Top3 匹配 | 附加要求"
    separator = "---|---|---|---|---"
    rows = [header, separator]

    for field in fields_info:  # 显示全部字段
        name = str(field.get('original_field', '') or '-')
        extra = str(field.get('extra_require', '') or '-')
        final_match = str(field.get('final_match', '无匹配') or '无匹配')
        match_status = str(field.get('match_status', '待分析') or '待分析')
        top3 = field.get('simple_match_top3', [])

        # 规范化 Top3 内容
        try:
            top3_str = '，'.join(map(str, (top3 or [])[:3])) if top3 else '无'
        except Exception:
            top3_str = '无'

        # 将换行替换为空格，避免破坏表格结构
        def _clean_cell(s: str) -> str:
            return s.replace('\n', ' ').replace('\r', ' ').strip()

        # row = f"{_clean_cell(name)} | {_clean_cell(extra)} | {_clean_cell(final_match)} | {_clean_cell(match_status)} | {_clean_cell(top3_str)}"
        row = f"{_clean_cell(name)} | {_clean_cell(final_match)} | {_clean_cell(match_status)} | {_clean_cell(top3_str)} | {_clean_cell(extra)} |"

        rows.append(row)

    return "\n".join(rows)


def _parse_json_safely(json_str: Optional[str]) -> Dict:
    """
    安全解析JSON字符串

    Args:
        json_str: JSON字符串

    Returns:
        Dict: 解析后的字典，解析失败返回空字典
    """
    if not json_str:
        return {}

    try:
        if isinstance(json_str, str):
            return json.loads(json_str)
        elif isinstance(json_str, dict):
            return json_str
    except (json.JSONDecodeError, TypeError):
        pass

    return {}


def _extract_requirement_content(require_json: Dict) -> str:
    """
    从require_json中提取需求内容描述

    Args:
        require_json: 需求JSON数据

    Returns:
        str: 需求内容描述
    """
    if not require_json:
        return "具体需求内容待明确"

    # 尝试从不同字段提取描述
    description = (require_json.get('description') or
                  require_json.get('requirement') or
                  require_json.get('content') or
                  "具体需求内容待明确")

    return str(description)


def _analyze_field_statistics(field_availability: Dict) -> Dict[str, Any]:
    """
    分析字段统计信息

    Args:
        field_availability: 字段可用性数据

    Returns:
        Dict: 包含字段统计的字典
    """
    if not field_availability or 'fields_info' not in field_availability:
        return {
            'total_fields': 0,
            'available_fields': 0,
            'missing_fields': 0,
            'availability_rate': 0.0
        }

    fields_info = field_availability.get('fields_info', [])
    total = len(fields_info)
    available = sum(1 for field in fields_info if field.get('status') == '1')
    missing = total - available
    rate = (available / total * 100) if total > 0 else 0

    return {
        'total_fields': total,
        'available_fields': available,
        'missing_fields': missing,
        'availability_rate': rate
    }


def _generate_recommendation(field_stats: Dict[str, Any]) -> str:
    """
    根据字段统计生成建议

    Args:
        field_stats: 字段统计信息

    Returns:
        str: 建议内容
    """
    rate = field_stats.get('availability_rate', 0)

    if rate >= 80:
        return "直接开发"
    elif rate >= 50:
        return "部分开发+启动采集"
    else:
        return "暂缓，优先解决数据采集"


def _extract_detailed_requirements(require_json: Dict) -> Dict[str, Any]:
    """
    提取详细需求信息

    Args:
        require_json: 需求JSON数据

    Returns:
        Dict: 详细需求信息
    """
    return {
        'description': _extract_requirement_content(require_json),
        'fields': require_json.get('fields', []),
        'time_range': require_json.get('time_range', '待明确'),
        'target_object': require_json.get('target_object', '待明确'),
        'delivery_format': require_json.get('delivery_format', '待明确'),
        'usage_purpose': require_json.get('usage_purpose', '待明确')
    }


def save_report_to_file(report_content: str, config: dict) -> str:
    """
    将报告内容保存到文件

    Args:
        report_content: 报告内容
        config: 运行时配置，需包含 config["configurable"]["thread_id"] 以作为文件名

    Returns:
        str: 保存的文件名
    """
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    thread_id = config.get("configurable", {}).get("thread_id")
    filename = f"{thread_id}.md"

    try:
        # 1) 保存 Markdown 文件
        with open(filename, 'w', encoding='utf-8') as f:
            f.write(report_content)

        # 2) 同名生成 PDF 文件（最佳努力，缺少依赖时跳过）
        pdf_filename = f"{thread_id}.pdf"
        _save_pdf_best_effort(report_content, pdf_filename)

        return filename
    except Exception as e:
        print(f"保存报告文件失败: {str(e)}")
        return f"报告生成完成（保存失败: {str(e)}）"


def _save_pdf_best_effort(report_content: str, pdf_filename: str) -> None:
    """
    将 Markdown 文本内容尽力转换为 PDF 保存到 pdf_filename。

    改进点：
    - 使用 reportlab.platypus 渲染段落、标题和表格，表格为 5 列自适应换行，版式清晰。
    - 若运行环境未安装依赖，不抛出异常，仅跳过 PDF 生成。
    """
    try:
        # 延迟导入，避免在未安装依赖时影响主流程
        from reportlab.lib.pagesizes import A4
        from reportlab.lib import colors
        from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
        from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle
        from reportlab.pdfbase import pdfmetrics
        from reportlab.pdfbase.cidfonts import UnicodeCIDFont
        from xml.sax.saxutils import escape as xml_escape

        # 注册内置中文字体，避免中文乱码
        try:
            pdfmetrics.registerFont(UnicodeCIDFont('STSong-Light'))
            font_name = 'STSong-Light'
        except Exception:
            # 回退到默认字体（不保证中文显示）
            font_name = 'Helvetica'

        # 文档与样式
        doc = SimpleDocTemplate(
            pdf_filename,
            pagesize=A4,
            leftMargin=40,
            rightMargin=40,
            topMargin=40,
            bottomMargin=40,
        )
        styles = getSampleStyleSheet()
        base = ParagraphStyle('Base', parent=styles['Normal'], fontName=font_name, fontSize=12, leading=16)
        h1 = ParagraphStyle('H1', parent=base, fontSize=18, leading=22, spaceAfter=6)
        h2 = ParagraphStyle('H2', parent=base, fontSize=16, leading=20, spaceAfter=6)
        h3 = ParagraphStyle('H3', parent=base, fontSize=14, leading=18, spaceAfter=6)
        bullet = ParagraphStyle('Bullet', parent=base, leftIndent=16)
        table_text = ParagraphStyle('TableText', parent=base, fontSize=10, leading=14)

        story = []

        lines = report_content.splitlines()

        import re

        def parse_table_block(start_index: int):
            # 解析从 start_index 开始的 Markdown 表格，返回 (rows, next_index)
            header_line = lines[start_index].strip()
            if '|' not in header_line:
                return None, start_index
            # 解析表头
            def split_row(s: str):
                parts = [p.strip() for p in s.strip().strip('|').split('|')]
                return parts
            header_cells = split_row(header_line)
            if len(header_cells) < 2:
                return None, start_index
            if start_index + 1 >= len(lines):
                return None, start_index
            separator_line = lines[start_index + 1].strip()
            sep_parts = [p.strip() for p in separator_line.strip().strip('|').split('|')]
            if len(sep_parts) != len(header_cells):
                return None, start_index
            # 简单校验分隔线形态（--- 或 :---:）
            if not all(re.match(r'^:?-{3,}:?$', p or '-') for p in sep_parts):
                return None, start_index

            # 收集数据行
            data = [header_cells]
            i = start_index + 2
            while i < len(lines):
                row_line = lines[i]
                if '|' not in row_line:
                    break
                row_cells = split_row(row_line)
                # 对齐列数
                if len(row_cells) < len(header_cells):
                    row_cells += [''] * (len(header_cells) - len(row_cells))
                elif len(row_cells) > len(header_cells):
                    row_cells = row_cells[:len(header_cells)]
                data.append(row_cells)
                i += 1
            return data, i

        i = 0
        while i < len(lines):
            raw = lines[i]
            line = raw.rstrip()
            if not line.strip():
                story.append(Spacer(1, 8))
                i += 1
                continue

            # 表格块优先解析
            table_block, next_i = parse_table_block(i)
            if table_block is not None:
                # 将所有单元格转为 Paragraph 以支持换行
                table_data = []
                for r, row in enumerate(table_block):
                    if r == 0:
                        # 表头
                        table_data.append([Paragraph(xml_escape(cell), table_text) for cell in row])
                    else:
                        table_data.append([Paragraph(xml_escape(cell), table_text) for cell in row])

                # 列宽（A4 宽度下按比例分配，5 列）
                total_width = doc.width
                col_widths = [0.18, 0.18, 0.20, 0.20, 0.24]
                col_widths = [w * total_width for w in col_widths[:len(table_block[0])]]

                tbl = Table(table_data, colWidths=col_widths, hAlign='LEFT', repeatRows=1)
                tbl.setStyle(TableStyle([
                    ('FONTNAME', (0, 0), (-1, -1), font_name),
                    ('FONTSIZE', (0, 0), (-1, -1), 10),
                    ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#F5F5F5')),
                    ('ALIGN', (0, 0), (-1, -1), 'LEFT'),
                    ('VALIGN', (0, 0), (-1, -1), 'TOP'),
                    ('GRID', (0, 0), (-1, -1), 0.5, colors.HexColor('#BBBBBB')),
                    ('LEFTPADDING', (0, 0), (-1, -1), 4),
                    ('RIGHTPADDING', (0, 0), (-1, -1), 4),
                    ('TOPPADDING', (0, 0), (-1, -1), 4),
                    ('BOTTOMPADDING', (0, 0), (-1, -1), 4),
                ]))
                story.append(tbl)
                story.append(Spacer(1, 10))
                i = next_i
                continue

            # 标题与段落
            if line.startswith('### '):
                story.append(Paragraph(xml_escape(line[4:]), h3))
            elif line.startswith('## '):
                story.append(Paragraph(xml_escape(line[3:]), h2))
            elif line.startswith('# '):
                story.append(Paragraph(xml_escape(line[2:]), h1))
            elif line.startswith('- '):
                story.append(Paragraph(xml_escape('• ' + line[2:]), bullet))
            else:
                story.append(Paragraph(xml_escape(line), base))
            i += 1

        doc.build(story)

    except ImportError:
        # 未安装 reportlab，忽略 PDF 生成
        pass
    except Exception as e:
        # 其他异常不影响主流程
        print(f"PDF 生成失败（已忽略）: {e}")
