from httpx._transports import default
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_text_splitters import CharacterTextSplitter
from langchain.text_splitter import MarkdownTextSplitter

class ChunkService:
    def __init__(self):
        pass

    def chunk_docs(self,docs: list[Document],chunk_type: str,chunk_size: int,chunk_overlap: int):
        #load异常情况处理
        if docs[0].metadata.get("error"):
            return docs
        #字符文本切分
        if chunk_type == 'character_text_splitter':
            # 设置分块器
            text_splitter = CharacterTextSplitter(
                chunk_size=chunk_size,  # 拆分后的文本块所包含的字符个数
                chunk_overlap=chunk_overlap,  # 块与块之间重叠的字符个数
            )
            chunks = text_splitter.split_documents(docs)
            print("\n=== 文档分块结果 ===")
            for i, chunk in enumerate(chunks, 1):
                print(f"\n--- 第 {i} 个文档块 ---")
                print(f"内容: {chunk.page_content}")
                print(f"元数据: {chunk.metadata}")
                print("-" * 50)
            return chunks;
        
        #递归字符文本切分
        elif chunk_type == 'recursive_character_text_Splitter':
            # 定义分割符列表，按优先级依次使用
            separators = ["\n\n", "。","；", "，"]
            # 创建递归分块器，并传入分割符列表
            text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=chunk_size,
                chunk_overlap=chunk_overlap,
                separators=separators
            )
            chunks = text_splitter.split_documents(docs)
            print("\n=== 文档分块结果 ===")
            for i, chunk in enumerate(chunks, 1):
                print(f"\n--- 第 {i} 个文档块 ---")
                print(f"内容: {chunk.page_content}")
                print(f"元数据: {chunk.metadata}")
                print("-" * 50)
                #load产生的md文件处理
        elif chunk_type == 'markdown_text_Splitter':
            #使用RecursiveCharacterTextSplitter切分markdown文件
            splitter = MarkdownTextSplitter(
                chunk_size = chunk_size,
                chunk_overlap = chunk_overlap
            )
            chunks = splitter.create_documents([docs[0].page_content])
            print("\n=== 文档分块结果 ===")
            for i, chunk in enumerate(chunks, 1):
                print(f"\n--- 第 {i} 个文档块 ---")
                print(f"内容: {chunk.page_content}")
                print(f"元数据: {chunk.metadata}")
                print("-" * 50)
            return chunks;
        
