from langchain_text_splitters import (
    RecursiveCharacterTextSplitter,
    MarkdownTextSplitter,
    PythonCodeTextSplitter,
    Language
)
from langchain.schema import Document
import os
from typing import List, Dict, Optional


class CodeSplitter:
    """代码文本分割器"""

    @staticmethod
    def create_splitter(file_extension: str, **kwargs):
        """创建适合文件类型的分割器"""
        # 默认参数
        default_kwargs = {
            "chunk_size": 1500,
            "chunk_overlap": 150
        }

        # 合并默认参数和用户提供的参数
        merged_kwargs = {**default_kwargs, **kwargs}

        if file_extension == ".py":
            # 更新: 去掉language参数
            return RecursiveCharacterTextSplitter.from_language(
                language=Language.PYTHON,
                **merged_kwargs
            )
        elif file_extension in [".js", ".ts"]:
            return RecursiveCharacterTextSplitter.from_language(
                language=Language.JS,
                **merged_kwargs
            )
        elif file_extension == ".java":
            return RecursiveCharacterTextSplitter.from_language(
                language=Language.JAVA,
                **merged_kwargs
            )
        elif file_extension == ".md":
            return MarkdownTextSplitter(**merged_kwargs)
        else:
            # 默认使用RecursiveCharacterTextSplitter
            return RecursiveCharacterTextSplitter(**merged_kwargs)

    @staticmethod
    def split_documents(documents: Dict[str, List[Document]], **kwargs) -> Dict[str, List[Document]]:
        """为每个文件选择合适的分割器并分割文档

        Args:
            documents: 文件路径到文档列表的映射
            **kwargs: 分割器参数

        Returns:
            文件路径到分割后文档列表的映射
        """
        result = {}

        for file_path, docs in documents.items():
            _, ext = os.path.splitext(file_path)
            splitter = CodeSplitter.create_splitter(ext, **kwargs)

            # 将每个文档分割成更小的块
            split_docs = []
            for doc in docs:
                chunks = splitter.split_text(doc.page_content)
                for i, chunk in enumerate(chunks):
                    # 创建新的文档对象，保留元数据
                    split_doc = Document(
                        page_content=chunk,
                        metadata={
                            **doc.metadata,
                            "chunk_id": i,
                            "total_chunks": len(chunks),
                            "source_file": file_path
                        }
                    )
                    split_docs.append(split_doc)

            result[file_path] = split_docs

        return result
