import json

from langchain_text_splitters import RecursiveCharacterTextSplitter


def chunking(filepath):
    with open(filepath, "r", encoding="utf-8") as f:
        data = json.load(f)  # json文档加载

    splitter = RecursiveCharacterTextSplitter(
        chunk_size = 300,
        chunk_overlap = 50,
        separators = ["\n", "。", "！", "？"]
    )

    chunks = []  # 存储分割后的文本块
    metadata = []  # 存储每个文本块的元数据信息

    # 遍历每个发言记录
    # for speech in data["debate"]: （错误写法）
    for entry in data:
        for speech in entry["debate"]:
            # 提取基础元数据
            base_meta = {
                # "competition": data["competition"], （错误写法）
                "competition": entry["competition"],  # 赛事名称
                # "topic": data["topic"], （错误写法）
                "topic": entry["topic"],  # 辩论主题
                "stance": speech["stance"],  # 辩手立场
                "debater": speech["debater"]  # 辩手姓名
            }

            # 分割长文本，将辩手的发言内容 utterance 分割成多个小文本块
            split_texts = splitter.split_text(speech["utterance"])

            # 为每个分割块创建记录
            for i, text in enumerate(split_texts):
                chunks.append(text)  # 将文本块存入列表
                metadata.append({
                    **base_meta,  # 继承基础元数据
                    "chunk_id": f"{speech['debater']}_{i}",  # 生成唯一块标识（辩手名_序号）
                    "word_count": len(text)  # 统计当前块的字符数
                })

    # print(f"总分割块数: {len(chunks)}")
    # print("\n示例块内容：")
    # print(chunks[0][:200] + "...")  # 显示第一块的前200个字符
    # print("\n对应元数据：")
    # print(metadata[0])

    return chunks
