import time
import sys
import os
import glob
from core.logger import get_backend_logger
from core.summary_file_utils import create_directories, check_existing_files, save_chapter_files, save_draft_file, save_plot_file, save_chapters_file, save_sole_draft_file
from core.parser_utils import parse_chapters
from core.summary_novel import summary_draft, summary_plot, summary_chapters, new_summary_draft
from config import MAX_NOVEL_SUMMARY_LENGTH, MAX_THREAD_NUM, ENABLE_ONLINE_DEMO
# 获取后端日志器
logger = get_backend_logger()
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

def batch_yield(generators, ret=[], max_co_num=5):
    results = [None] * len(generators)
    yields = [None] * len(generators)
    finished = [False] * len(generators)
    errors = [None] * len(generators)  # 记录每个生成器的错误

    while True:
        co_num = 0
        for i, gen in enumerate(generators):
            if finished[i]:
                continue
            try:
                co_num += 1
                yield_value = next(gen)
                yields[i] = yield_value
            except StopIteration as e:
                results[i] = e.value
                finished[i] = True
            except Exception as e:
                # 记录错误但不终止整个循环
                errors[i] = str(e)
                finished[i] = True
                yields[i] = None  # 标记为失败
                logger.warning(f"生成器 {i} 发生异常: {e}")
            if co_num >= max_co_num:
                    break
        
        if all(finished):
            break

        yield yields

    # 如果有错误，记录但继续处理
    if any(error is not None for error in errors):
        error_summary = [f"生成器{i}: {error}" for i, error in enumerate(errors) if error is not None]
        logger.warning(f"批量处理中部分生成器出现异常: {'; '.join(error_summary)}")

    ret.clear()
    ret.extend(results)
    return ret

def process_novel(content, novel_name, model, sub_model, max_novel_summary_length, max_thread_num):
    if ENABLE_ONLINE_DEMO:
        if max_novel_summary_length > MAX_NOVEL_SUMMARY_LENGTH:
            raise Exception("在线Demo模型下，最大文档长度不能超过" + str(MAX_NOVEL_SUMMARY_LENGTH) + "个字符！")
        if max_thread_num > MAX_THREAD_NUM:
            raise Exception("在线Demo模型下，最大线程数不能超过" + str(MAX_THREAD_NUM) + "！")

    if len(content) > max_novel_summary_length:
        content = content[:max_novel_summary_length]
        yield {"progress_msg": f"文档长度超出最大处理长度，已截断，只处理前{max_novel_summary_length}个字符。"}
        time.sleep(1)

    yield {"progress_msg": "开始中..."}

    upload_dir, draft_folder, plot_folder, chapters_folder, split_folder = create_directories(novel_name)     # 创建保存目录并检查文件是否存在

    file_check_msg, draft_file, plot_file, chapters_file = check_existing_files(upload_dir, novel_name)
    if file_check_msg:
        yield {"progress_msg": "注意: " + ", ".join(file_check_msg)}
    else:
        yield {"progress_msg": f"已创建保存目录: {upload_dir}"}


    yield {"progress_msg": "正在解析章节..."}    # Parse chapters
    chapter_titles, chapter_contents = parse_chapters(content)
    save_chapter_files(split_folder, chapter_titles, chapter_contents)     # 按章节保存拆分后的文档

    if len(chapter_titles) == 0:
        raise Exception("解析出章节数为0！！！")

    # Process draft summaries
    yield {"progress_msg": "正在生成剧情摘要..."}
    dw_list = []
    # 创建大模型请求生成器 - 批量生成剧情摘要
    # gens = [summary_draft(model, sub_model, ' '.join(title), content) for title, content in zip(chapter_titles, chapter_contents)]  --- 旧方法
    files_name = [os.path.join(split_folder, file) for file in os.listdir(split_folder)]
    gens = [new_summary_draft(model, sub_model, file_name) for file_name in sorted(files_name)]

    # 处理大模型返回结果 - 按批次获取生成进度和结果
    for yields in batch_yield(gens, ret=dw_list, max_co_num=max_thread_num):
        chars_num = sum([e['chars_num'] for e in yields if e is not None])
        current_cost = sum([e['current_cost'] for e in yields if e is not None])
        currency_symbol = next((e['currency_symbol'] for e in yields if e is not None), "Unknown Currency")
        model_text = next((e['model'] for e in yields if e is not None), "Unknown Model")
        yield {"progress_msg": f"正在生成剧情摘要 进度：{sum([1 for e in yields if e is not None])} / {len(yields)} 模型：{model_text} 已生成字符：{chars_num} 已花费：{current_cost:.4f}{currency_symbol}"}

    # 保存剧情摘要到文件
    # 过滤掉dw_list中为None的对象，避免访问None对象的属性
    valid_dw_list = [dw for dw in dw_list if dw is not None]
    valid_chapter_titles = [title for title, dw in zip(chapter_titles, dw_list) if dw is not None]
    for i, (chapter_title, content) in enumerate(zip(valid_chapter_titles, valid_dw_list), 1):
        if content is not None and hasattr(content, 'x'):
            save_sole_draft_file(draft_folder, chapter_title, content.x, i)
        else:
            logger.warning(f"第{i}章 {chapter_title} 剧情摘要内容为空或格式错误，跳过保存")
    saved_draft_file = save_draft_file(draft_file, valid_chapter_titles, valid_dw_list)
    yield {"progress_msg": f"剧情摘要已保存至 {saved_draft_file}"}

    # Process plot summaries
    yield {"progress_msg": "正在生成章节大纲..."}
    cw_list = []
    # 跳过 dw_list 中为 None 的项，并记录日志
    valid_chapter_titles = []
    valid_dw_list = []
    for idx, (title, dw) in enumerate(zip(chapter_titles, dw_list)):
        if dw is not None:
            valid_chapter_titles.append(title)
            valid_dw_list.append(dw)
        else:
            logger.warning(f"第{idx+1}章剧情摘要生成失败，跳过该章节")
    # 创建大模型请求生成器 - 批量生成章节大纲
    gens = [summary_plot(model, sub_model, ' '.join(title), dw.x) for title, dw in zip(valid_chapter_titles, valid_dw_list)]
    # 处理大模型返回结果 - 按批次获取生成进度和结果
    for yields in batch_yield(gens, ret=cw_list, max_co_num=max_thread_num):
        chars_num = sum([e['chars_num'] for e in yields if e is not None])
        current_cost = sum([e['current_cost'] for e in yields if e is not None])
        currency_symbol = next(e['currency_symbol'] for e in yields if e is not None)
        model_text = next(e['model'] for e in yields if e is not None)
        yield {"progress_msg": f"正在生成章节大纲 进度：{sum([1 for e in yields if e is not None])} / {len(yields)} 模型：{model_text} 已生成字符：{chars_num} 已花费：{current_cost:.4f}{currency_symbol}"}
    
    # 保存章节大纲到文件
    saved_plot_file = save_plot_file(plot_file, chapter_titles, cw_list)
    yield {"progress_msg": f"章节大纲已保存至 {saved_plot_file}"}

    # Process chapter summaries
    yield {"progress_msg": "正在生成全书大纲..."}
    ow_list = []
    # 过滤掉cw_list中为None的对象，避免访问None对象的属性
    valid_cw_list = [cw for cw in cw_list if cw is not None]
    gens = [summary_chapters(model, sub_model, novel_name, chapter_titles, [cw.global_context['chapter'] for cw in valid_cw_list])]
    for yields in batch_yield(gens, ret=ow_list, max_co_num=max_thread_num):
        chars_num = sum([e['chars_num'] for e in yields if e is not None])
        current_cost = sum([e['current_cost'] for e in yields if e is not None])
        currency_symbol = next((e['currency_symbol'] for e in yields if e is not None), "Unknown Currency")
        model_text = next((e['model'] for e in yields if e is not None), "Unknown Model")
        yield {"progress_msg": f"正在生成全书大纲 模型：{model_text} 已生成字符：{chars_num} 已花费：{current_cost:.4f}{currency_symbol}"}
    
    # 保存全书大纲到文件
    outline = ow_list[0]
    saved_chapters_file = save_chapters_file(chapters_file, outline)
    yield {"progress_msg": f"全书大纲已保存至 {saved_chapters_file}"}

    plot_data = {}
    draft_data = {}

    # 添加检查确保dw和cw不为None
    # 确保outline不为None
    if outline is not None:
        for title, chapter_outline, cw, dw in zip(chapter_titles, [e[1] for e in outline.xy_pairs], cw_list, dw_list):
            # 跳过为None的对象
            if cw is None or dw is None:
                continue
            chapter_name = ' '.join(title)
            plot_data[chapter_name] = {
                'chunks': [('', e) for e, _ in dw.xy_pairs],
                'context': chapter_outline # 不采用cw.global_context['chapter']，因为不含章节名
            }
            draft_data[chapter_name] = {
                'chunks': dw.xy_pairs,
                'context': ''  # Draft doesn't have global context
            }
    else:
        logger.warning("outline为None，跳过处理")

    final_response = {
        "progress_msg": "处理完成！",
        "outline": {
            "chunks": outline.xy_pairs if outline is not None else [],
            "context": outline.global_context['outline'] if outline is not None else ''
        },
        "plot": plot_data,
        "draft": draft_data
    }

    yield final_response
