# 并发处理文件块的Python实现（含FastAPI接口）
import json
import logging
import re
import threading
import time
import warnings
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, Any, List

from flask import jsonify, Response
from langchain_text_splitters import RecursiveCharacterTextSplitter

from llm import LLM
from prompt import *
from utils import file_chunker, batch_chunks, TokenCounter

REFORCE_THINK = True

llm = LLM("llm_model")
config = llm.get_config()

MODEL = config['modelname']
MODEL_CONTEXT_WINDOW = config['context_window']
MODEL_INPUT_WINDOW = MODEL_CONTEXT_WINDOW // 2

# 初始化服务组件
tokencounter = TokenCounter(config['modelpath'])
MAX_WORKERS = config['max_workers']

# 配置日志和警告
warnings.filterwarnings("ignore", category=UserWarning)
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
                    level=logging.INFO)
logger = logging.getLogger(__name__)

r_splitter = RecursiveCharacterTextSplitter(
    chunk_size=MODEL_INPUT_WINDOW,
    chunk_overlap=0,
    separators=["\n\n", "\n", "。", " ", ""]
)


def files_token_stat(session):
    start_time = time.time()
    total_tokens = 0
    template_tokens = 0
    data = session.get("data", {})

    # 模板文件token数
    if "templateFiles" in data:
        for file in data["templateFiles"]:
            if "content" in file:
                file["token_num"] = tokencounter.token_count(file["content"])
                template_tokens = file["token_num"]
                total_tokens += file["token_num"]
            else:
                file["token_num"] = 0

    if "materialFiles" in data:
        for file in data["materialFiles"]:
            if "content" in file:
                # 包含标题的token计算
                file["token_num"] = tokencounter.token_count(file["content"]) + tokencounter.token_count(file["title"])
                total_tokens += file["token_num"]
            else:
                file["token_num"] = 0
    logger.info(
        f"文件token数计算完毕，耗时 {time.time() - start_time:.2f}s； 模板token数为： {template_tokens}，总token数为:{total_tokens}。")
    return template_tokens, total_tokens


# 计算分块要点token数
def chunk_token_stat(chunks, key):
    start_time = time.time()
    total_tokens = 0
    for chunk in chunks:
        if key in chunk:
            # 包含标题的token计算
            chunk["claim_token_num"] = tokencounter.token_count(chunk[key]) + tokencounter.token_count(chunk["title"])
            total_tokens += chunk["claim_token_num"]
        else:
            chunk["claim_token_num"] = 0
    #logger.info(f"分块token计算完毕，耗时 {time.time() - start_time:.2f}s")
    return total_tokens


def parse_think(string):
    res = string
    if "</think>" in string:
        think, res = string.split("</think>")
        if len(res) == 0:
            res = think.replace("<think>")
    logger.info(f"think转化完毕")
    return res


def extract_json(res):
    start_time = time.time()
    try:
        json_data = json.loads(res)
        logger.info(f"json抽取，耗时 {time.time() - start_time:.2f}s")
        return json_data
    except json.JSONDecodeError as e:
        logger.error(f"尝试直接解析```json出错，错误信息为: \n{str(e)}")
    json_string = ""
    lines = res.split("\n")
    in_json_block = False
    for line in lines:
        if "```json" in line:
            in_json_block = True
            continue
        elif "```" in line and in_json_block:
            in_json_block = False
        if in_json_block:
            json_string += line

    json_string = json_string.strip()

    try:
        json_data = json.loads(json_string)
        logger.info(f"手动json抽取解析，耗时 {time.time() - start_time:.2f}s")
        return json_data
    except json.JSONDecodeError as e:
        logger.error(f"手动JSON解析失败: {str(e)}\n 原始内容: {res}")
        return None


def extract_write_res(res):
    start_time = time.time()
    lines = res.split("\n")
    paper_string = ""
    not_paper_string = ""
    in_paper_block = False
    for line in lines:
        if "###" in line and "仿写结果" in line:
            in_paper_block = True
            continue
        elif in_paper_block:
            paper_string += line
        else:
            not_paper_string += line

    paper_string = paper_string.strip()
    logger.info(f"json抽取，耗时 {time.time() - start_time:.2f}s")
    return not_paper_string, paper_string


"""
# 内容提取器
三级处理策略：
    1. 首次尝试：直接调用LLM生成
    2. 失败重试：最多3次尝试
    3. 最终回退：解析think标签内容
输入组装：
    - 模板结构 + 当前分块 + 前块摘要
输出处理：
    - 成功：返回JSON格式要点
    - 失败：返回原始文本
"""
def extract_claim(template, fileid, chunkid, content, prev_result, max_retry_times=3):
    start_time = time.time()
    logger.info(f"extract_claim=================================> 内容提取器开始处理，参考文献id: {fileid}, 块id: {chunkid}")
    llm_input = extract_prompt_v1.replace("<!-模板文档-!>", template) \
        .replace("<!-参考文献-!>", content) \
        .replace("<!-参考文献前置内容-!>", prev_result if prev_result else "")
    i = 0
    while max_retry_times:
        # llm_output = llm.generate_response(llm_input, MODEL)
        llm_output = llm.generate_response(llm_input)
        logger.info(f"参考文献id: {fileid}, 块id: {chunkid} 的要点内容为: \n{llm_output}")
        llm_output_json = extract_json(llm_output)
        logger.info(f"参考文献id: {fileid}, 块id: {chunkid} 的要点内容json格式为: \n{llm_output}")
        if llm_output_json is not None:
            logger.info(f"extract_claim====================> 参考文献id: {fileid}, 块id: {chunkid} 的要点内容解析成功, 耗时：\n {time.time() - start_time:.2f }s")
            return llm_output, llm_output_json
        max_retry_times -= 1
        i += 1
        logger.info(f"======第{i}次重试提取要点======")
    return "", None


"""
请根据以下模板评估参考文献质量：
    === 模板要求 ===
    {template}

    === 参考文献 ===
    {references_text}

    输出要求：
    1. 按"参考文献序号X"格式评分（0-10分）
    2. 给出具体评分理由
    3. 如需模板修改，在"修改后的模板文档"字段说明

    输入： 
    template = "行业报告模板\n## 技术趋势\n## 市场分析"
    references_text = 
    ##### 参考文章序号1：AI发展
      - 神经网络突破...
    ##### 参考文章序号2：经济数据
      - GDP增长率...

    LLM输出:
    {
        "参考文献1": "9分：详细描述技术突破，完全匹配'技术趋势'章节",
        "参考文献2": "6分：仅提供基础经济数据，需补充行业关联分析",
        "修改后的模板文档": "建议在'市场分析'章节增加'AI经济影响'子节"
    }
"""
def judge_score(template, references_text, max_retry_times=3):
    """ references_text
     处理单个批次并返回结果
    ##### 参考文章序号{序号}：{标题}
      基本点内容：
      - {要点1}
      - {要点2}
      - {要点3}
    ##### 参考文章序号{序号}：{标题}
      基本点内容：
      - {要点1}
      - {要点2}

    """
    llm_input = judge_score_prompt_v2.replace("<!-模板文档-!>", template) \
        .replace("<!-参考文献-!>", references_text)
    logger.info(f"judge_score======================> 内容评分开始处理，模板为：\n{llm_input}")
    i = 0
    while max_retry_times:
        llm_output = llm.generate_response(llm_input)
        logger.info(f"judge_score内容评分结果为：\n{llm_output}")
        llm_output_json = extract_json(llm_output)
        logger.info(f"judge_score内容评分结果对应的json为：\n{llm_output}")
        if llm_output_json is not None:
            modified_template = llm_output_json.pop("修改后的模板文档", None)
            return llm_output, llm_output_json, modified_template
        max_retry_times -= 1
        i += 1
        logger.info(f"第===================== {i} ===============================次重试评分")
    return "", None, None

"""
构建带序号的参考文章内容格式：
遍历references列表，为每个参考文章添加序号前缀
如果key参数为None，直接使用参考文章内容；否则提取参考文章中指定key的值
使用enumerate生成序号（从start参数开始）
将所有格式化后的参考文章用换行符连接成一个字符串返回

##### 参考文章序号{序号}：{标题}
  基本点内容：
  - {要点1}
  - {要点2}
  - {要点3}
##### 参考文章序号{序号}：{标题}
  基本点内容：
  - {要点1}
  - {要点2}

"""
def build_judge_reference_content(references, key=None, start=1):
    # references: [chunk1, chunk2, ...] ,key='claim'
    prefix_references = []
    if key is None:
        prefix_references = [f"""##### 参考文章序号{i}：\n{ref}""" for i, ref in enumerate(references, start=start)]
    else:
        prefix_references = [f"""##### 参考文章序号{i}：\n{ref[key]}""" for i, ref in enumerate(references, start=start)]

    prefix_references = "\n".join(prefix_references)
    return prefix_references



"""
构建标准化的参考文献内容字符串
核心功能：
    1. 按文件ID分组处理参考文献
    2. 支持提取指定字段（如claim/content）或原始内容
    3. 生成带结构化标记的文本（含标题和序号）
特殊处理：
    - 相同文件的不同分块自动合并
    - 支持自定义起始序号
references = [
    {
        "fileid": "A",
        "title": "深度学习",
        "content": "神经网络基础...",
        "claim": "CNN用于图像处理\nRNN适合序列数据"
    },
    {
        "fileid": "A",
        "chunkid": 1,
        "content": "Transformer结构...",
        "claim": "Attention机制是核心"
    },
    {
        "fileid": "B",
        "title": "机器学习",
        "content": "监督学习算法...",
        "claim": "SVM适合小样本"
    }
]

输出示例（key="claim"）
##### 参考文章序号1：深度学习
  基本点内容：
  - CNN用于图像处理
  - RNN适合序列数据
  - Attention机制是核心
##### 参考文章序号2：机器学习
  基本点内容：
  - SVM适合小样本
"""
def build_reference_content(references, key=None, start=1):
    logger.info(f"build_reference_content========================================> 开始,对应references数据为：\n{references},\nkey为:{key}")
    # 创建fileId到序号的映射
    start_time = time.time()
    file_id_to_index = {}
    for idx, ref in enumerate(references, start=start):
        file_id = ref.get('fileid') or ref.get('fileId')  # 兼容大小写
        if file_id and file_id not in file_id_to_index:
            file_id_to_index[file_id] = idx

    logger.info(f"file_id_to_index========================================> 对象,对应数据为：\n{file_id_to_index}")
    # 按文件ID将chunk分组
    file_groups = {}
    for ref in references:
        file_id = ref.get('fileid') or ref.get('fileId')
        if file_id:
            if file_id not in file_groups:
                file_groups[file_id] = {'refs': [], 'title': ref.get('title', '')}
            file_groups[file_id]['refs'].append(ref)
    logger.info(f"file_groups========================================> 按文件ID将chunk分组,对应数据为：\n{file_groups}")
    prefix_references = []

    # 处理分组后的内容
    for file_id, group in file_groups.items():
        idx = file_id_to_index.get(file_id, "未知")

        if key == "claim":
            # 合并所有claim(要点)
            all_claims = []
            for ref in group['refs']:
                claim_text = ref.get('claim', '')
                claim_parts = claim_text.split('\n')
                claim_points = [f"- {point}" for point in claim_parts if point.strip()]
                all_claims.extend(claim_points)
                logger.info(f"合并所有claim(要点)完毕========================================> all_claims 对应数据为：\n{all_claims}")
            prefix_references.append(
                f"##### 参考文章序号{idx}：{group['title']}\n"
                f"  基本点内容：\n" + "\n".join(all_claims)
            )
            logger.info(f"prefix_references========================================> prefix_references,对应数据为：\n{prefix_references}")
        elif key == "content":
            # 合并所有content (如果需要)
            all_contents = [ref.get('content', '') for ref in group['refs']]
            logger.info(f"合并所有content完毕========================================> all_contents 对应数据为：\n{all_contents}")
            prefix_references.append(
                f"##### 参考文章序号{idx}：{group['title']}\n" +
                "\n\n".join(all_contents)
            )
            logger.info(f"prefix_references========================================> prefix_references,对应数据为：\n{prefix_references}")
        else:
            # 合并所有原始引用
            all_refs = [str(ref) for ref in group['refs']]
            logger.info(f"合并所有原始引用 完毕========================================> all_refs 对应数据为：\n{all_refs}")
            prefix_references.append(
                f"##### 参考文章序号{idx}：\n" +
                "\n\n".join(all_refs)
            )
            logger.info(f"prefix_references========================================> prefix_references,对应数据为：\n{prefix_references}")
    logger.info(f"build_reference_content 执行完毕，耗时{time.time() - start_time:.2f}s")
    res = "\n".join(prefix_references)
    logger.info(f"build_reference_content 执行完毕，返回结果:\n{res}")
    return res


"""
并行处理文档分块的核心函数
关键机制:
    1. 按fileid分组处理
    2. 使用Condition变量确保同一文件的分块按顺序处理
    3. 线程间传递前一个分块的处理结果保持连贯性

file_data = {
    "file1": {
        "results": {0: "前块内容", 1: "当前内容"},  # 分块结果存储
        "condition": threading.Condition()  # 同步原语
    }
}
"""
def process_chunks_concurrently(template, chunks: List[Dict[str, Any]], max_workers: int = 5):
    file_data = {}  # 全局共享字典，按fileid存储处理状态
    file_data_lock = threading.Lock()  # 保护file_data的锁

    def process_chunk(chunk: Dict[str, Any]):
        nonlocal file_data
        fileid = chunk['fileid']
        chunkid = chunk['chunkid']

        logger.info(f"当前处理分块: =========================>文件id:{fileid},第 {chunkid}块")
        prefix = ""
        if chunk['title']:
            prefix = "参考文献:" + chunk['title'] + "\n"
        content = prefix + chunk['content']
        title = chunk.get('title', '')  # 新增标题处理

        #  初始化文件状态
        with file_data_lock:
            if fileid not in file_data:
                file_data[fileid] = {
                    'results': {},  # 存储该文件已处理的分块结果
                    'condition': threading.Condition()  # 线程协调条件变量
                }
            logger.info(f"当前字典状态: =========================>\n{file_data}")
            file_info = file_data[fileid]
            logger.info(f"文件id为：{fileid}的文件加入字典中，当前字典内容: =========================>\n{file_data}")
            # 等待前序分块完成
        prev_result = None
        if chunkid > 0:
            with file_info['condition']:
                while (chunkid - 1) not in file_info['results']:
                    file_info['condition'].wait()  # 阻塞，直到前一个分块完成
                # 传递前块结果 前块结果截断保留200字符传递
                prev_result = file_info['results'][chunkid - 1]
                prev_result = prev_result[:200] if len(prev_result) > 200 else prev_result

        # 调用业务函数extract_claim生成当前分块的结果 分块要点
        claim, claim_json = extract_claim(template, fileid, chunkid, content, prev_result)

        if claim_json is not None:
            chunk['claim'] = "\n".join(list(claim_json.values()))
            chunk["claim_json"] = json.dumps(claim_json, ensure_ascii=False)
        else:
            chunk['claim'] = parse_think(claim)
            chunk["claim_json"] = None

        # 保存结果并通知其他线程
        with file_info['condition']:
            file_info['results'][chunkid] = chunk['claim']
            file_info['condition'].notify_all()

        logger.info(f"Completed processing {fileid} chunk {chunkid} (标题: {title})")

    # 按 fileid 和 chunkid 排序（保证同一文件的分块按顺序处理）
    # 提交任务到线程池（最多2个线程并发）
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        logger.info(f"开启了{max_workers}个线程并行处理")
        chunks_sorted = sorted(chunks, key=lambda x: (x['fileid'], x['chunkid']))
        logger.info(f"排序后分块情况: {chunks_sorted}")
        futures = [executor.submit(process_chunk, chunk) for chunk in chunks_sorted]
        for future in futures:
            future.result()

"""
评分流程：
    1. 构建批处理内容：combine_claims(batch)
    2. 调用LLM进行多维度评分：
       - 内容相关性(0-10分)
       - 模板符合度(是/否)
    3. 返回修改建议
输出示例：
    {
        "参考文献1": "8分：包含关键技术指标",
        "修改后的模板": "新增AI伦理章节"
    }
"""
def process_batch(batch, template_file_content):
    # batch: [chunk1, chunk2, ...]
    """处理单个批次并返回结果
    ##### 参考文章序号{序号}：{标题}
      基本点内容：
      - {要点1}
      - {要点2}
      - {要点3}
    ##### 参考文章序号{序号}：{标题}
      基本点内容：
      - {要点1}
      - {要点2}
    """
    start_time = time.time()
    chunk_text = build_judge_reference_content(batch, 'claim')
    logger.info(f"处理单个批次并返回结果: =========================>chunk_text 对应数据为：\n{chunk_text}")
    # {
    #     "参考文献1": "x分",
    #     "参考文献2": "y分",
    #     ...
    #      "参考文献n": "z分",
    #   "修改后的模板文档": "...",
    # }
    _, chunk_scores, modified_template = judge_score(template_file_content, chunk_text)
    batch_results = []

    if chunk_scores:
        # 将打分结果与chunk关联
        for idx, chunk in enumerate(batch):
            chunk_name = f"参考文献{idx + 1}"
            if chunk_name in chunk_scores:
                # 使用正则表达式提取分数
                pattern = r'(\d+)分'
                matches = re.findall(pattern, chunk_scores[chunk_name])
                if matches:
                    batch_results.append({
                        'chunk': chunk,
                        'score': matches[0]
                    })
    logger.info(f"process_batch 执行完毕,耗时{time.time() - start_time}.2f秒,\n 内容为:\n{batch_results}")
    return modified_template, batch_results


"""
文档生成流式接口
实现特点:
    1. 支持自定义引导提示(follow_content)
    2. 流式输出控制(0.05秒间隔)
    3. 错误处理机制
"""
def imitative_write_stream(template, content, follow_content=""):
    global llm, MODEL, REFORCE_THINK
    start_time = time.time()
    logger.info(f"imitative_write_stream: =========================>follow_content 对应数据为: \n{follow_content}")

    # 输入验证
    if follow_content and ("<" not in follow_content or ">" not in follow_content):
        logger.warning("follow_content 可能不是有效的提示词模板，将使用默认模板")
        follow_content = ""

    prompt = follow_content if follow_content else template_write_prompt_v3
    logger.info(f"imitative_write_stream: =========================>prompt 对应数据为：\n{prompt}")
    llm_input = prompt.replace("<!-模板文档-!>", template) \
        .replace("<!-参考文献-!>", content)
    logger.info(f"imitative_write_stream: =========================>llm_input 对应数据为：\n{llm_input}")
    if REFORCE_THINK:
        llm_input += "/think"
    def stream_generator():

        accumulated_content = ""
        logger.info(f"imitative_write_stream--stream_generator: =========================> 调用开始，llm_input 对应数据为：\n{llm_input}")
        response = llm.generate_response_stream(llm_input)
        for chunk in response:
            if chunk is None:
                yield f"data: [ERROR] 模型调用失败"
                break
            else:
                accumulated_content += chunk
                # yield f"data: {chunk}\nevent: new_text\n\n"
                yield f"data: {chunk.replace(chr(10), '\\n').replace(chr(13), '\\r')}\nevent: new_text\n\n"
                time.sleep(0.05)  # 控制流式输出速度
        yield f"event: end\ndata: [DONE]\n\n"

        logger.info(f"imitative_write_stream 执行完毕,耗时{time.time() - start_time}.2f秒,\n accumulated_content内容为:\n{accumulated_content}")
    return stream_generator()


def select_median_string(modified_templates):
    start_time = time.time()
    strings_with_word_counts = [
        (s, len(s.split())) for s in modified_templates
    ]
    sorted_strings = sorted(strings_with_word_counts, key=lambda x: x[1])
    sorted_strings_only = [s for s, _ in sorted_strings]

    n = len(sorted_strings_only)
    median_index = n // 2

    logger.info(f"select_median_string 执行完毕，{time.time() - start_time}.2f 秒")
    return sorted_strings_only[-1], sorted_strings_only[median_index], sorted_strings_only[0]


def get_stream_headers():
    """返回标准的SSE头"""
    return {
        "Content-Type": "text/event-stream",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "X-Accel-Buffering": "no"
    }


"""
文档生成主流程控制器
处理逻辑:
    1. 输入验证 → 2. Token检查 → 3. 分块处理 → 4. 内容评分 → 5. 最终生成
"""
def reporter_write(session: dict):
    data = session.get("data", {})
    template_files = data.get("templateFiles", [])  # 报告模板
    material_files = data.get("materialFiles", [])  # 参考文献
    follow_content = data.get("followContent", "")  # 根据报告<!-模板文档-!>和<!-参考文献-!>，生成相应的检查报告。

    if not template_files or not material_files:
        return jsonify({"error": "缺少模板文件或参考文献"}), 400

    # 确保文件包含标题
    for file in template_files + material_files:
        file["title"] = file.get("title", "")

    template_file = template_files[0]
    template_content = template_file["content"]
    template_tokens, total_tokens = files_token_stat(session)

    # 前置 Token 检查
    def check_token_limit(template_tokens_param, model_input_window):
        """检查token是否超过限制，返回错误响应或None"""
        if template_tokens_param > model_input_window:
            error_msg = f"模板输入过长（预估 {template_tokens} tokens，超过模型输入窗口限制 {MODEL_INPUT_WINDOW} tokens）,请选择合适大小的模板模板进行写作"
            logger.error(error_msg)

            def generate_error():
                yield f"event: error\ndata: [ERROR] {error_msg}\n\n"
                yield "event: end\ndata: [DONE]\n\n"

            return Response(generate_error(), mimetype="text/event-stream")
        return None

    error_response = check_token_limit(template_tokens, MODEL_INPUT_WINDOW)

    if error_response:
        return error_response

    # 计算token数
    if total_tokens <= MODEL_INPUT_WINDOW:
        reference_content = build_reference_content(material_files, "content")
        return Response(imitative_write_stream(template_content, reference_content, follow_content),headers=get_stream_headers())

    # 拆分文件
    _, reference_chunks = file_chunker(r_splitter, session)

    process_chunks_concurrently(template_content, reference_chunks)

    # 计算要点总token数
    total_claim_tokens = chunk_token_stat(reference_chunks, "claim")

    if total_claim_tokens + template_tokens <= MODEL_INPUT_WINDOW:
        reference_content = build_reference_content(reference_chunks, "claim")
        return Response(imitative_write_stream(template_content, reference_content, follow_content),headers=get_stream_headers())

    # 分批处理和评分
    sorted_chunks = sorted(reference_chunks, key=lambda x: (x['fileid'], x['chunkid']))
    logger.info(f"sorted_chunks =======================================================> 排序后块结果为:\n{sorted_chunks}")
    chunk_batchs = batch_chunks(sorted_chunks, MODEL_INPUT_WINDOW)
    logger.info(
        f"chunk_batchs =======================================================> 分块后的块结果为:\n{sorted_chunks}")
    modified_templates = []
    all_chunk_scores = []
    with ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(process_batch, batch, template_content) for batch in chunk_batchs] # [[chunk1, chunk2, ...], [chunk3, chunk4, ...], ...]
        for future in futures:
            #  {
            #     "参考文献1": "x分",
            #     "参考文献2": "y分",
            #     ...
            #     "参考文献n": "z分",
            #  }
            modified_template, batch_results = future.result()
            if modified_template:
                modified_templates.append(modified_template)
            all_chunk_scores.extend(batch_results)

    # 选取字数居中的模板：
    old_template_file_content = template_content
    template_content, _, _ = select_median_string(modified_templates)
    template_content = modified_templates[-1]
    sorted_chunks = sorted(all_chunk_scores, key=lambda x: int(x['score']), reverse=True)

    # 按分数排序并选择Chunk
    sorted_scored_chunks = sorted(all_chunk_scores, key=lambda x: int(x['score']), reverse=True)
    selected_chunks = []
    current_token = tokencounter.token_count(template_content)

    for item in sorted_scored_chunks:
        chunk = item['chunk']
        chunk_token = tokencounter.token_count(chunk['claim']) + tokencounter.token_count(chunk['title'])
        if current_token + chunk_token <= MODEL_INPUT_WINDOW:
            selected_chunks.append(chunk)
            current_token += chunk_token

    logger.info(f"reporter_write: ========================================> selected_chunks, 对应数据为: \n{selected_chunks}")
    if selected_chunks:
        final_reference = build_reference_content(selected_chunks, "claim")
        logger.info(
            f"reporter_write: ========================================> \n template_content, 对应数据为: \n{template_content}"
            f"\n final_reference, 对应的数据为: \n{final_reference}\n"
            f"\n follow_content, 对应的数据为: \n{follow_content}\n"
        )
        return Response(imitative_write_stream(template_content, final_reference, follow_content),headers=get_stream_headers())

    return {"error": "无法完成写作：没有足够相关内容"}


if __name__ == '__main__':
    print(123)
