from langchain.text_splitter import RecursiveCharacterTextSplitter
from large_model.model_tool.case_model import FunctionalTestCaseGenerator, FunctionalTestCaseReviewer
from langchain.tools import tool
from utils.file_dir import get_doc_md_dir
from langchain_unstructured import UnstructuredLoader
import os
import re
import pdfplumber
import mammoth



def clean_document_to_md(input_path: str, output_path: str) -> str:
    """
    将 PDF 或 DOCX 转换为清理后的 Markdown 文件
    """
    ext = os.path.splitext(input_path)[1].lower()
    text = ""

    if ext == ".pdf":
        with pdfplumber.open(input_path) as pdf:
            for page in pdf.pages:
                text += page.extract_text() or "" + "\n\n"
    elif ext == ".docx":
        with open(input_path, "rb") as docx_file:
            result = mammoth.extract_raw_text(docx_file)
            text = result.value
    else:
        raise ValueError("仅支持 .pdf 和 .docx 文件")

    # 清理格式
    text = re.sub(r'^\s*\d+\s*$', '', text, flags=re.MULTILINE)  # 页码
    text = re.sub(r'^\s*-\s*-\s*-\s*$', '', text, flags=re.MULTILINE)  # 分隔线
    text = re.sub(r'\n{3,}', '\n\n', text)  # 多余空行
    text = text.strip()

    with open(output_path, "w", encoding="utf-8") as f:
        f.write(text)

    print(f"✅ 已生成清理后的 Markdown：{output_path}")
    return output_path



#  文档分块（Chunking）
def split_large_doc(documents, chunk_size=1000, overlap=400):
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=overlap,
        separators=["\n\n", "\n", "。", "！", "？"]
    )
    chunks = splitter.split_documents(documents)
    return [c.page_content for c in chunks]


#   3. 逐块生成用例（Map-Reduce 模式）
def generate_per_chunk(chunks):
    generator = FunctionalTestCaseGenerator()
    results = []
    for i, chunk in enumerate(chunks, 1):
        print(f"📄 处理第 {i}/{len(chunks)} 块...")
        case = generator._run(requirements_document=chunk)
        results.append(case)
    return results


#  4. 合并 + 去重 + 评审（Reduce 阶段）
def merge_and_review(all_cases, original_content: str):
    combined = "\n".join(all_cases)
    reviewer = FunctionalTestCaseReviewer()
    final = reviewer._run(
        requirements_document=f"请基于以下真实需求文档内容评审测试用例，禁止引入与文档无关的功能模块：\n\n{original_content}",
        initial_test_cases=combined
    )
    return final


@tool
def smart_functional_test_agent_runner(doc_path: str) -> str:
    """
    根据任意大小需求文档，智能生成功能测试用例（支持大文档分块处理）
    参数:
        doc_path: 本地文件路径（支持 .pdf / .docx）
    返回:
        完整的测试用例文本
    """

    # ✅ 第1步：生成清理后的 Markdown
    base, ext = os.path.splitext(doc_path)
    clean_md_path = base + "_clean.md"

    if not os.path.exists(clean_md_path):
        clean_document_to_md(doc_path, clean_md_path)

    # ✅ 第2步：加载 Markdown
    loader = UnstructuredLoader(clean_md_path, mode="elements")
    docs = loader.load()
    content = "\n".join([doc.page_content for doc in docs]).strip()

    print(f"📄 清理后文档总字符数：{len(content)}")
    print("🔍 内容预览：")
    print(content)

    total_chars = len(content)

    # ✅ 第3步：判断大小并生成用例
    if total_chars > 4000:
        print("📦 检测到【大文档】，启用分块策略")
        from langchain.schema import Document
        chunks = split_large_doc([Document(page_content=content)])
        intermediate = generate_per_chunk(chunks)
        final = merge_and_review(intermediate, content)
    else:
        print("📄 检测到【小文档】，直接生成")
        initial = FunctionalTestCaseGenerator()._run(
            requirements_document=f"请基于以下真实需求文档内容生成功能测试用例，禁止杜撰无关内容：\n\n{content}"
        )
        final = FunctionalTestCaseReviewer()._run(
            requirements_document=f"请基于以下真实需求文档内容评审测试用例，禁止引入与文档无关的功能模块：\n\n{content}",
            initial_test_cases=initial
        )
    return final


# 在 __main__ 部分添加分阶段调试
if __name__ == "__main__":
    # 初始化工具
    tools = [
        smart_functional_test_agent_runner
    ]

    content = smart_functional_test_agent_runner(get_doc_md_dir("王者aipass问答.docx"))
    print(content)
