from langchain.text_splitter import RecursiveCharacterTextSplitter
from large_model.model_tool.case_model import FunctionalTestCaseGenerator, FunctionalTestCaseReviewer


#  文档分块（Chunking）
def split_large_doc(documents, chunk_size=2000, overlap=400):
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=overlap,
        separators=["\n\n", "\n", "。", "！", "？"]
    )
    chunks = splitter.split_documents(documents)
    return [c.page_content for c in chunks]


#   3. 逐块生成用例（Map-Reduce 模式）
def generate_per_chunk(chunks):
    generator = FunctionalTestCaseGenerator()
    results = []
    for i, chunk in enumerate(chunks, 1):
        print(f"📄 处理第 {i}/{len(chunks)} 块...")
        case = generator._run(requirements_document=chunk)
        results.append(case)
    return results


#  4. 合并 + 去重 + 评审（Reduce 阶段）
def merge_and_review(all_cases):
    # 合并所有用例
    combined = "\n".join(all_cases)

    # 用评审工具统一评审
    reviewer = FunctionalTestCaseReviewer()
    final = reviewer._run(
        requirements_document="完整需求文档（或摘要）",
        initial_test_cases=combined
    )
    return final


def handle_large_doc(doc_path):
    from langchain_community.document_loaders import UnstructuredFileLoader

    loader = UnstructuredFileLoader(doc_path, mode="elements")
    documents = loader.load()

    chunks = split_large_doc(documents, chunk_size=2000)

    # 每块生成用例
    intermediate_cases = generate_per_chunk(chunks)

    # 合并后统一评审
    final_cases = merge_and_review(intermediate_cases)
    return final_cases

