# -*- coding: utf-8 -*-
# @Time: 2025/6/29 21:55
# @Author: wzd
# @Email: 2146333089@qq.com
# @File: kmowledgeDBCreate.py

from langchain_text_splitters import RecursiveCharacterTextSplitter


def create_chunks_from_unified_data(parquet_path):
    """从统一存储创建向量库分块"""
    df = pd.read_parquet(parquet_path)

    # 专业分块配置（中文优化）
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,
        chunk_overlap=200,
        separators=["\n\n", "。", "！", "？", "\n", "；", "，", " "]
    )

    chunks = []
    for _, row in df.iterrows():
        # 按章节内容分块
        section_chunks = splitter.create_documents(
            [row["content"]],
            metadatas=[{
                "source": row["source_file"],
                "section_id": row["section_id"],
                "title": row["title"],
                "level": row["level"]
            }]
        )
        chunks.extend(section_chunks)

    return chunks
