# 并发处理文件块的Python实现（含FastAPI接口）
import json
import logging
import re
import threading
import time
import warnings
from concurrent.futures import ThreadPoolExecutor
from typing import List, Dict, Any, Tuple, Optional

from flask import Response
from langchain_text_splitters import RecursiveCharacterTextSplitter

from llm import LLM
from prompt import *
from utils import file_chunker, batch_chunks, TokenCounter, match_claim_chunk

REFORCE_THINK = True

llm = LLM("llm_model")
config = llm.get_config()

MODEL = config['modelname']
MODEL_CONTEXT_WINDOW = config['context_window']
MODEL_INPUT_WINDOW = MODEL_CONTEXT_WINDOW // 2

# 初始化服务组件
tokencounter = TokenCounter(config['modelpath'])
MAX_WORKERS = config['max_workers']

# 配置日志和警告
warnings.filterwarnings("ignore", category=UserWarning)
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
                    level=logging.INFO)
logger = logging.getLogger(__name__)

r_splitter = RecursiveCharacterTextSplitter(
    chunk_size=MODEL_INPUT_WINDOW,
    chunk_overlap=0,
    separators=["\n\n", "\n", "。", " ", ""]
)

fine_grained_splitter = RecursiveCharacterTextSplitter(
    chunk_size=500,
    chunk_overlap=100,
    separators=["\n\n", "\n", "。", " ", ""]
)


def files_token_stat(session: Dict[str, Any]) -> tuple[int, int]:
    """统计会话中所有文件的Token数"""
    start_time = time.time()
    total_tokens = 0
    template_tokens = 0
    data = session.get("data", {})

    # 统计模板文件Token
    if "templateFiles" in data:
        for file in data["templateFiles"]:
            if "content" in file:
                file["token_num"] = tokencounter.token_count(file["content"])
                template_tokens += file["token_num"]
                total_tokens += file["token_num"]
            else:
                file["token_num"] = 0

    # 统计参考文件Token
    if "materialFiles" in data:
        for file in data["materialFiles"]:
            if "content" in file:
                file["token_num"] = (tokencounter.token_count(file["content"]) + 
                                    tokencounter.token_count(file.get("title", "")))
                total_tokens += file["token_num"]
            else:
                file["token_num"] = 0
    logger.info(
        f"文件Token计算完毕，耗时 {time.time() - start_time:.2f}s；"
        f"模板Token数：{template_tokens}，总Token数：{total_tokens}"
    )
    return template_tokens, total_tokens


def chunk_token_stat(chunks: List[Dict[str, Any]], key: str) -> int:
    """统计分块中指定字段的总Token数"""
    start_time = time.time()
    total_tokens = 0
    for chunk in chunks:
        if key in chunk:
            chunk["claim_token_num"] = (tokencounter.token_count(chunk[key]) + 
                                       tokencounter.token_count(chunk.get("title", "")))
            total_tokens += chunk["claim_token_num"]
        else:
            chunk["claim_token_num"] = 0

    logger.info(f"分块Token计算完毕，耗时 {time.time() - start_time:.2f}s")
    return total_tokens


def parse_think(string):
    res = string
    if "<think>" in string and "</think>" in string:
        think, res = string.split("</think>")
        if len(res) == 0:
            res = think.replace("<think>", "")
    logger.info(f"think转化完毕")
    return res



def extract_json(res: str) -> Dict[str, Any] | None:
    """从LLM输出中提取JSON数据"""
    start_time = time.time()
    try:
        json_data = json.loads(res)
        logger.info(f"直接解析JSON成功，耗时 {time.time() - start_time:.2f}s")
        return json_data
    except json.JSONDecodeError as e:
        logger.error(f"直接解析JSON失败：{str(e)}")

    # 手动提取JSON代码块
    json_string = ""
    lines = res.split("\n")
    in_json_block = False
    for line in lines:
        if "```json" in line:
            in_json_block = True
            continue
        elif "```" in line and in_json_block:
            in_json_block = False
        if in_json_block:
            json_string += line

    json_string = json_string.strip()
    if not json_string:
        logger.error("未提取到JSON代码块")
        return None

    # 解析手动提取的JSON
    try:
        json_data = json.loads(json_string)
        logger.info(f"手动提取JSON成功，耗时 {time.time() - start_time:.2f}s")
        return json_data
    except json.JSONDecodeError as e:
        logger.error(f"手动解析JSON失败：{str(e)}\n原始内容：{res}")
        return None


def extract_write_res(res: str) -> tuple[str, str]:
    """从LLM仿写结果中提取内容"""
    start_time = time.time()
    lines = res.split("\n")
    paper_string = ""
    not_paper_string = ""
    in_paper_block = False
    for line in lines:
        if "###" in line and "仿写结果" in line:
            in_paper_block = True
            continue
        elif in_paper_block:
            paper_string += line + "\n"
        else:
            not_paper_string += line + "\n"

    paper_string = paper_string.strip()
    not_paper_string = not_paper_string.strip()
    logger.info(f"仿写结果提取完毕，耗时 {time.time() - start_time:.2f}s")
    return not_paper_string, paper_string


def extract_claim(
    template: str,
    fileid: str,
    chunkid: int,
    content: str,
    prev_result: str | None,
    max_retry_times: int = 3
) -> tuple[str, Dict[str, Any] | None]:
    """提取文献分块的要点（调用LLM）"""
    start_time = time.time()
    logger.info(f"开始提取要点，文献id: {fileid}, 块id: {chunkid}")
    
    # 构建提示词
    llm_input = extract_prompt_v1.replace("<!-模板文档-!>", template) \
        .replace("<!-参考文献-!>", content) \
        .replace("<!-参考文献前置内容-!>", prev_result if prev_result else "")

    # 带重试的LLM调用
    for i in range(max_retry_times):
        llm_output = llm.generate_response(llm_input)
        logger.info(f"LLM输出要点内容: \n{llm_output}")
        
        llm_output_json = extract_json(llm_output)
        if llm_output_json is not None:
            logger.info(f"要点解析成功, 耗时：{time.time() - start_time:.2f}s")
            return llm_output, llm_output_json
            
        logger.info(f"第{i+1}次重试提取要点")
    
    logger.error(f"超过最大重试次数，提取要点失败")
    return "", None


def judge_score(
    template: str,
    references_text: str,
    max_retry_times: int = 3
) -> tuple[str, Dict[str, Any] | None, str | None]:
    """评估参考文献质量（调用LLM）"""
    llm_input = judge_score_prompt_v2.replace("<!-模板文档-!>", template) \
        .replace("<!-参考文献-!>", references_text)
    logger.info(f"开始评分，模板: \n{llm_input}")

    for i in range(max_retry_times):
        llm_output = llm.generate_response(llm_input)
        logger.info(f"评分结果: \n{llm_output}")
        
        llm_output_json = extract_json(llm_output)
        if llm_output_json is not None:
            modified_template = llm_output_json.pop("修改后的模板文档", None)
            return llm_output, llm_output_json, modified_template
            
        logger.info(f"第{i+1}次重试评分")
    
    logger.error(f"超过最大重试次数，评分失败")
    return "", None, None


def build_judge_reference_content(
    references: List[Dict[str, Any]], 
    key: str | None = None, 
    start: int = 1
) -> str:
    """构建评分用的参考内容格式"""
    prefix_references = []
    if key is None:
        prefix_references = [
            f"##### 参考文章序号{i}：\n{ref}" 
            for i, ref in enumerate(references, start=start)
        ]
    else:
        prefix_references = [
            f"##### 参考文章序号{i}：{ref.get('title', '无标题')}\n{ref.get(key, '')}" 
            for i, ref in enumerate(references, start=start)
        ]

    return "\n".join(prefix_references)


def build_reference_content(
    references: List[Dict[str, Any]], 
    key: str | None = None, 
    start: int = 1
) -> str:
    """构建结构化的参考内容格式（按文件ID分组）"""
    start_time = time.time()
    logger.info(f"开始构建参考内容，分块数量：{len(references)}，目标字段：{key}")

    # 建立文件ID到序号的映射
    file_id_to_index = {}
    for idx, ref in enumerate(references, start=start):
        file_id = ref.get('fileid') or ref.get('fileId')
        if file_id and file_id not in file_id_to_index:
            file_id_to_index[file_id] = idx
    logger.info(f"file_id_to_index========================================> 对象,对应数据为：\n{file_id_to_index}")

    # 按文件ID分组分块
    file_groups = {}
    for ref in references:
        file_id = ref.get('fileid') or ref.get('fileId')
        if not file_id:
            continue
        if file_id not in file_groups:
            file_groups[file_id] = {
                'refs': [],
                'title': ref.get('title', '无标题')
            }
        file_groups[file_id]['refs'].append(ref)
    logger.info(f"file_groups========================================> 按文件ID将chunk分组,对应数据为：\n{file_groups}")

    # 生成结构化内容
    prefix_references = []
    for file_id, group in file_groups.items():
        idx = file_id_to_index.get(file_id, "未知")
        if key == "claim":
            all_claims = []
            for ref in group['refs']:
                claim_text = ref.get('claim', '')
                claim_parts = [p.strip() for p in claim_text.split('\n') if p.strip()]
                all_claims.extend([f"- {part}" for part in claim_parts])
                logger.info(f"合并所有claim(要点)完毕========================================> all_claims 对应数据为：\n{all_claims}")
            prefix_references.append(
                f"##### 参考文章序号{idx}：{group['title']}\n"
                f"  基本点内容：\n" + "\n".join([str(idx)+"."+claim for idx, claim in enumerate(all_claims)])
            )
            logger.info(f"prefix_references========================================> prefix_references,对应数据为：\n{prefix_references}")
        elif key == "content":
            all_contents = [ref.get('content', '') for ref in group['refs']]
            logger.info(f"合并所有content完毕========================================> all_contents 对应数据为：\n{all_contents}")
            prefix_references.append(
                f"##### 参考文章序号{idx}：{group['title']}\n"
                f"  原文内容：\n" + "\n\n".join(all_contents)
            )
            logger.info(f"prefix_references========================================> prefix_references,对应数据为：\n{prefix_references}")
        else:
            all_refs = [str(ref) for ref in group['refs']]
            logger.info(f"合并所有原始引用 完毕========================================> all_refs 对应数据为：\n{all_refs}")
            prefix_references.append(
                f"##### 参考文章序号{idx}：{group['title']}\n"
                f"  原始分块信息：\n" + "\n\n".join(all_refs)
            )
            logger.info(f"prefix_references========================================> prefix_references,对应数据为：\n{prefix_references}")
    logger.info(f"build_reference_content 执行完毕，耗时{time.time() - start_time:.2f}s")
    result = "\n".join(prefix_references)
    logger.info(f"build_reference_content 执行完毕，返回结果:\n{result}")
    return result

def find_claim_and_chunk(
    references: List[Dict[str, Any]],  # 原始references列表（每个元素是完整chunk）
    target_article_idx: int,          # 目标参考文章序号（如“参考文章序号3”的3）
    target_point_idx: int,            # 目标基本点序号（如“1. 基本点内容”的1）
    start_article_idx: int = 1        # 文章序号起始值（默认从1开始，与build_reference_content一致）
) -> Tuple[Optional[str], Optional[Dict[str, Any]]]:
    """
    根据参考文章序号和基本点序号，查找对应的基本点内容及完整chunk
    
    参数说明：
        references: 原始输入的references列表，每个元素必须包含chunk的7个字段
        target_article_idx: 要查找的“参考文章序号”（如构建结果中“##### 参考文章序号2”的2）
        target_point_idx: 要查找的“基本点序号”（如构建结果中“1. 基本点内容”的1）
        start_article_idx: 文章序号的起始值（需与build_reference_content的start参数一致）
    
    返回值：
        (目标基本点内容, 对应的完整chunk字典)，未找到则返回(None, None)
    """
    # 第一步：重建“fileid -> 参考文章序号”的映射（与build_reference_content逻辑完全一致）
    fileid_to_article_idx: Dict[str, int] = {}
    for idx, chunk in enumerate(references, start=start_article_idx):
        # 优先取fileid，无则取fileId（兼容字段大小写/命名差异）
        fileid = chunk.get("fileid") or chunk.get("fileId")
        if fileid and fileid not in fileid_to_article_idx:
            fileid_to_article_idx[fileid] = idx

    # 第二步：根据目标文章序号，找到对应的fileid
    target_fileid: Optional[str] = None
    for fileid, article_idx in fileid_to_article_idx.items():
        if article_idx == target_article_idx:
            target_fileid = fileid
            break
    if not target_fileid:
        print(f"未找到参考文章序号{target_article_idx}对应的文件（fileid）")
        return None

    # 第三步：遍历所有chunk，收集目标fileid下的所有基本点（claim）及对应chunk
    all_claims: List[str] = []  # 存储所有基本点内容（按顺序）
    claim_to_chunk: List[Dict[str, Any]] = []  # 存储每个基本点对应的完整chunk（索引对齐）
    for chunk in references:
        # 只处理目标fileid的chunk
        current_fileid = chunk.get("fileid") or chunk.get("fileId")
        if current_fileid != target_fileid:
            continue
        
        # 提取当前chunk的claim字段（基本点源内容），按换行分割为多个基本点
        claim_str = chunk.get("claim", "").strip()
        if not claim_str:
            continue  # 跳过无claim的chunk
        
        # 分割claim为单个基本点（清洗空行）
        single_claims = [p.strip() for p in claim_str.split("\n") if p.strip()]
        # 将当前chunk的所有基本点加入列表，并关联当前chunk
        all_claims.extend(single_claims)
        claim_to_chunk.extend([chunk] * len(single_claims))  # 索引对齐：每个基本点对应同一个chunk

    # 第四步：检查基本点序号是否有效，返回结果
    if 0 <= target_point_idx <= len(all_claims):
        # 基本点序号从1开始，列表索引从0开始，需减1
        target_claim = all_claims[target_point_idx - 1]
        target_chunk = claim_to_chunk[target_point_idx - 1]
        return target_chunk
    else:
        print(f"参考文章序号{target_article_idx}中，不存在基本点序号{target_point_idx}（共{len(all_claims)}个基本点）")
        return None

def process_chunks_concurrently(
    template: str, 
    chunks: List[Dict[str, Any]], 
    max_workers: int = 5
) -> None:
    """并发处理文档分块（保证同文件分块顺序）"""
    file_data = {}  # 存储文件处理状态
    file_data_lock = threading.Lock()
    max_workers = min(max_workers, MAX_WORKERS)  # 限制最大线程数

    def process_chunk(chunk: Dict[str, Any]) -> None:
        nonlocal file_data
        fileid = chunk['fileid']
        chunkid = chunk['chunkid']
        logger.info(f"开始处理分块: 文件id:{fileid}, 块id:{chunkid}")

        # 构建处理内容（包含标题前缀）
        prefix = f"参考文献:{chunk['title']}\n" if chunk.get('title') else ""
        content = prefix + chunk['content']

        # 初始化文件状态
        with file_data_lock:
            if fileid not in file_data:
                file_data[fileid] = {
                    'results': {},
                    'condition': threading.Condition()
                }
            file_info = file_data[fileid]

        # 等待前序分块完成
        prev_result = None
        if chunkid > 0:
            with file_info['condition']:
                while (chunkid - 1) not in file_info['results']:
                    file_info['condition'].wait()  # 阻塞等待
                prev_result = file_info['results'][chunkid - 1][:200]  # 截断前序结果

        try:
            # 提取当前分块要点
            claim, claim_json = extract_claim(template, fileid, chunkid, content, prev_result)

            if claim_json is not None:
                chunk['claim'] = "\n".join(list(claim_json.values()))
                chunk["claim_json"] = json.dumps(claim_json, ensure_ascii=False)
            else:
                chunk['claim'] = parse_think(claim)
                chunk["claim_json"] = None
        finally:
            # 保存结果并通知其他线程
            with file_info['condition']:
                file_info['results'][chunkid] = chunk.get('claim', f"处理失败: {fileid} 分块 {chunkid}")
                file_info['condition'].notify_all()

        logger.info(f"完成处理: 文件id:{fileid}, 块id:{chunkid}")

    # 排序分块并提交到线程池
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        logger.info(f"开启{max_workers}个线程并行处理")
        chunks_sorted = sorted(chunks, key=lambda x: (x['fileid'], x['chunkid']))
        futures = [executor.submit(process_chunk, chunk) for chunk in chunks_sorted]
        for future in futures:
            future.result()  # 等待所有任务完成


def process_batch(
    batch: List[Dict[str, Any]], 
    template_file_content: str
) -> tuple[str | None, List[Dict[str, Any]]]:
    """处理单个批次的分块并返回评分结果"""
    start_time = time.time()
    chunk_text = build_judge_reference_content(batch, 'claim')
    logger.info(f"开始处理批次，分块数量：{len(batch)}")

    # 调用评分函数
    _, chunk_scores, modified_template = judge_score(template_file_content, chunk_text)
    batch_results = []

    if chunk_scores:
        # 提取分数并关联到分块
        for idx, chunk in enumerate(batch):
            chunk_name = f"参考文献{idx + 1}"
            if chunk_name in chunk_scores:
                # 用正则提取分数（0-10分）
                pattern = r'(\d+)分'
                matches = re.findall(pattern, chunk_scores[chunk_name])
                if matches:
                    batch_results.append({
                        'chunk': chunk,
                        'score': matches[0]
                    })

    logger.info(f"批次处理完毕，耗时{time.time() - start_time:.2f}s")
    return modified_template, batch_results


def find_cite_numbers(text: str) -> List[tuple[str, str]]:
    # 匹配cite_symbol标签
    pattern = r'<cite_symbol>(.*?)</cite_symbol>'
    matches = re.findall(pattern, text, re.DOTALL)
    
    result = []
    for match in matches:
        # 提取所有符合 "文献序号:基本点序号" 格式的单元
        units = re.findall(r'([a-zA-Z0-9]+):([a-zA-Z0-9]+)', match)
        result.extend(units)  # 添加提取到的元组
    res = []
    for item in result:
        res.append((int(item[0]), int(item[1])))
    return res


def get_stream_headers():
    """返回标准的SSE头"""
    return {
        "Content-Type": "text/event-stream; charset=utf-8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "X-Accel-Buffering": "no"
    }

def imitative_write_stream(
    ref_type: str,
    template: str,
    content: str,
    chunks: List[Dict[str, Any]] = [],
    follow_content: str = ""):
    """生成文档内容（使用生成器实现流式输出，返回包含report和source的结果）"""
    global llm, MODEL, REFORCE_THINK
    start_time = time.time()
    logger.info(f"开始生成文档，参考类型: {ref_type}")
    logger.info(f"imitative_write_stream: =========================>follow_content 对应数据为: \n{follow_content}")

    # 输入验证
    if follow_content and ("<" not in follow_content or ">" not in follow_content):
        logger.warning("follow_content 可能不是有效的提示词模板，将使用默认模板")
        follow_content = ""

    # 选择提示词模板
    if follow_content:
        prompt = follow_content
    elif ref_type == "content":
        prompt = template_write_prompt_content_v4
    elif ref_type == "claim":
        prompt = template_write_prompt_claim_v4
    else:
        prompt = template_write_prompt_claim_v4

    logger.info(f"imitative_write_stream: =========================>prompt 对应数据为：\n{prompt}")
    
    # 构建LLM输入
    llm_input = prompt.replace("<!-模板文档-!>", template) \
        .replace("<!-参考文献-!>", content)
    logger.info(f"imitative_write_stream: =========================>llm_input 对应数据为：\n{llm_input}")
    
    if REFORCE_THINK:
        llm_input += "/think"

    def stream_generator():
        accumulated_content = ""
        logger.info(f"imitative_write_stream--stream_generator: =========================> 调用开始，llm_input 对应数据为：\n{llm_input}")
        # 生成内容（流式输出）
        response = llm.generate_response_stream(llm_input)
        for chunk in response:
            if chunk is None:
                yield f"data: [ERROR] 模型调用失败"
                break
            accumulated_content += chunk
            # 处理换行符，确保流式输出格式正确
            tmp_res = chunk.replace(chr(10), '\\n').replace(chr(13), '\\r')
            yield f"data: {tmp_res}\nevent: new_text\n\n"
            time.sleep(0.05)  # 控制输出速度

        # 解析思考过程
        parsed_content = parse_think(accumulated_content)
        
        # 准备结果字典
        res = {
            'report': parsed_content,  # 完整报告内容
            'source': []  # 来源信息列表
        }
        
        # 提取引用标记并匹配原文
        if ref_type == "claim" and chunks:
            cite_results = find_cite_numbers(parsed_content)
            claim_match_content = set()
            
            for cite in cite_results:
                try:
                    file_id, point_id = cite
                    # 查找匹配的片段
                    claim_chunk = find_claim_and_chunk(chunks, file_id, point_id)
                    if claim_chunk:
                        # 细粒度分割原文
                        fine_grained_chunks = fine_grained_splitter.split_text(claim_chunk["content"])
                        internal_chunks = [{
                            "fileid": claim_chunk["fileid"],
                            "title": claim_chunk["title"],
                            "out_chunkid": claim_chunk["chunkid"],
                            "chunkid": idx,
                            "content": fine_grained_chunk,
                            "claim": claim_chunk["claim"]
                        } for idx, fine_grained_chunk in enumerate(fine_grained_chunks)]

                        # 匹配最相关的片段
                        fine_grained_content = match_claim_chunk(claim_chunk["claim"], internal_chunks)
                        if fine_grained_content:
                            source_entry = {
                                f"{file_id}:{point_id}": {
                                    'title': fine_grained_content[0]['title'],
                                    'content': fine_grained_content[0]['content']
                                }
                            }
                            res['source'].append(source_entry)
                except Exception as e:
                    print("match_claim_chunk Exception",e)
                        
        # 输出包含report和source的完整结果
        cite_json = json.dumps(res, ensure_ascii=False)
        yield f"data: {cite_json}\nevent: cite_extracted\n\n"

        # 输出结束标志
        yield f"event: end\ndata: [DONE]\n\n"
        logger.info(f"文档生成完毕，耗时{time.time() - start_time:.2f}s，\naccumulated_content内容为:\n{accumulated_content}")

    return stream_generator()
    

def select_median_string(modified_templates: List[str]) -> tuple[str, str, str]:
    """选择字数最长、中位数、最短的模板"""
    start_time = time.time()
    if not modified_templates:
        logger.warning("无修改后的模板，返回空字符串")
        return "", "", ""

    # 按单词数排序
    strings_with_count = [(s, len(s.split())) for s in modified_templates]
    sorted_strings = sorted(strings_with_count, key=lambda x: x[1])
    sorted_only = [s for s, _ in sorted_strings]
    n = len(sorted_only)

    longest = sorted_only[-1]
    median = sorted_only[n // 2]
    shortest = sorted_only[0]

    logger.info(
        f"模板选择完毕，耗时 {time.time() - start_time:.2f}s，"
        f"最长模板字数：{len(longest.split())}，最短：{len(shortest.split())}"
    )
    return longest, median, shortest


def reporter_cw_cite(session: dict) -> dict | List[str]:
    """文档生成主流程控制器"""
    data = session.get("data", {})
    template_files = data.get("templateFiles", [])
    material_files = data.get("materialFiles", [])
    follow_content = data.get("followContent", "")

    # 输入验证
    if not template_files or not material_files:
        return {"error": "缺少模板文件或参考文献"}

    # 确保文件包含标题
    for file in template_files + material_files:
        file["title"] = file.get("title", "")

    template_file = template_files[0]
    template_content = template_file["content"]
    template_tokens, total_tokens = files_token_stat(session)

    # 前置 Token 检查
    def check_token_limit(template_tokens_param, model_input_window):
        """检查token是否超过限制，返回错误响应或None"""
        if template_tokens_param > model_input_window:
            error_msg = f"模板输入过长（预估 {template_tokens} tokens，超过模型输入窗口限制 {MODEL_INPUT_WINDOW} tokens）,请选择合适大小的模板模板进行写作"
            logger.error(error_msg)

            def generate_error():
                yield f"event: error\ndata: [ERROR] {error_msg}\n\n"
                yield "event: end\ndata: [DONE]\n\n"

            return Response(generate_error(), mimetype="text/event-stream")
        return None

    error_response = check_token_limit(template_tokens, MODEL_INPUT_WINDOW)

    if error_response:
        return error_response

    # 计算token数
    if total_tokens <= MODEL_INPUT_WINDOW:
        reference_content = build_reference_content(material_files, "content")
        # return imitative_write_stream("content", template_content, reference_content, follow_content=follow_content)
        return Response(imitative_write_stream("content", template_content, reference_content, follow_content),headers=get_stream_headers())

    # 情况2：总Token超过输入窗口，先分块提取要点
    _, reference_chunks = file_chunker(r_splitter, session)
    process_chunks_concurrently(template_content, reference_chunks)

    # 计算要点总Token数
    total_claim_tokens = chunk_token_stat(reference_chunks, "claim")

    # 情况2.1：要点总Token不超过输入窗口，使用要点生成
    if total_claim_tokens + template_tokens <= MODEL_INPUT_WINDOW:
        reference_content = build_reference_content(reference_chunks, "claim")
        # return imitative_write_stream("claim", template_content, reference_content, reference_chunks, follow_content)
        return Response(imitative_write_stream("claim", template_content, reference_content, reference_chunks, follow_content),headers=get_stream_headers())


    # 情况3：要点总Token仍超限制，分批处理和评分
    sorted_chunks = sorted(reference_chunks, key=lambda x: (x['fileid'], x['chunkid']))
    logger.info(f"sorted_chunks =======================================================> 排序后块结果为:\n{sorted_chunks}")
    chunk_batchs = batch_chunks(sorted_chunks, MODEL_INPUT_WINDOW)
    logger.info(f"chunk_batchs =======================================================> 分块后的块结果为:\n{sorted_chunks}")
    modified_templates = []
    all_chunk_scores = []
    # 并发处理批次评分
    with ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(process_batch, batch, template_content) for batch in chunk_batchs]
        for future in futures:
            modified_template, batch_results = future.result()
            if modified_template:
                modified_templates.append(modified_template)
            all_chunk_scores.extend(batch_results)

    # 选择优化后的模板
    if modified_templates:
        template_content, _, _ = select_median_string(modified_templates)
    else:
        logger.warning("未获取到修改后的模板，使用原始模板")

    # 按分数筛选分块（确保不超过Token限制）
    sorted_scored_chunks = sorted(all_chunk_scores, key=lambda x: int(x['score']), reverse=True)
    selected_chunks = []
    current_token = tokencounter.token_count(template_content)

    for item in sorted_scored_chunks:
        chunk = item['chunk']
        chunk_token = tokencounter.token_count(chunk['claim']) + tokencounter.token_count(chunk['title'])
        if current_token + chunk_token <= MODEL_INPUT_WINDOW:
            selected_chunks.append(chunk)
            current_token += chunk_token

    logger.info(f"reporter_write: ========================================> selected_chunks, 对应数据为: \n{selected_chunks}")
    # 生成最终文档
    if selected_chunks:
        final_reference = build_reference_content(selected_chunks, "claim")
        logger.info(
            f"reporter_write: ========================================> \n template_content, 对应数据为: \n{template_content}"
            f"\n final_reference, 对应的数据为: \n{final_reference}\n"
            f"\n follow_content, 对应的数据为: \n{follow_content}\n"
        )
        # return imitative_write_stream("claim", template_content, final_reference, selected_chunks, follow_content)
        return Response(imitative_write_stream("claim", template_content, final_reference, selected_chunks, follow_content),headers=get_stream_headers())

    return {"error": "无法完成写作：没有足够相关内容"}


if __name__ == '__main__':
    # app = Flask(__name__)
    # @app.route('/level-write-stream', methods=['POST'])
    # def level_write_stream_endpoint():
    #     data = request.get_json()
    #     if not data:
    #         return jsonify({"error": "Empty request body"}), 400
    #
    #     # 保持与原数据结构一致
    #     session = data.get('data') if isinstance(data, dict) else data
    #     session = {'data': session }
    #     # 确保 session 是字典类型
    #     if not isinstance(session, dict):
    #         return jsonify({"error": "Invalid data format"}), 400
    #     return reporter_write(session)
    # app.run(host='0.0.0.0', port=8000, debug=True)
    print("")