from typing import List, Dict
from pathlib import Path
import os
import sys
from langchain.text_splitter import MarkdownHeaderTextSplitter, RecursiveCharacterTextSplitter

class DocumentLoader:
    def __init__(self):
        # 使用 MarkdownHeaderTextSplitter 处理标题结构
        self.header_splitter = MarkdownHeaderTextSplitter(
            headers_to_split_on=[
                ("#", "h1"),
                ("##", "h2"),
                ("###", "h3"),
                ("####", "h4"),
            ]
        )
        
        # 使用 RecursiveCharacterTextSplitter 处理长文本
        self.chunk_size = 2000
        self.chunk_overlap = 200
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap,
            separators=["\n\n", "\n", " ", ""],
            keep_separator=True
        )

    def process_markdown_content(self, content: str, file_path: str) -> List[str]:
        """处理 Markdown 内容，针对API文档和普通文档采用不同的处理策略"""
        try:
            # 判断是否是API文档
            is_api_doc = "api" in file_path.lower() or any(keyword in content.lower() for keyword in ["uni.$as", "this.$as", "jsapi"])
            
            if is_api_doc:
                return self._process_api_doc(content, file_path)
            else:
                return self._process_regular_doc(content, file_path)
            
        except Exception as e:
            print(f"处理文档时发生错误: {str(e)}")
            return [content]  # 如果处理失败，返回原始内容

    def _process_api_doc(self, content: str, file_path: str) -> List[str]:
        """处理API文档，每个API方法作为一个独立的chunk，保留代码块"""
        # 1. 先按标题分割
        splits = self.header_splitter.split_text(content)
        
        processed_texts = []
        current_api = None
        api_content = []
        
        for split in splits:
            metadata = split.metadata
            text = split.page_content.strip()
            
            # 获取标题级别
            level = next((v for k, v in metadata.items() if k.startswith('h')), None)
            
            # 如果是方法级别的标题（通常是 h3，以 ### 开头）
            if metadata.get('h3') and '###' in text:
                # 如果之前有累积的API内容，先保存
                if current_api and api_content:
                    # 合并内容时保持格式
                    api_text = self._format_api_content(api_content)
                    processed_texts.append(api_text)
                
                # 开始新的API文档
                current_api = text.replace('### ', '').strip()
                api_content = [f"API方法: {current_api}"]
            
            # 如果是API的内容部分
            elif current_api:
                api_content.append(text)
            
            # 如果是其他内容（如模块说明等）
            else:
                if text.strip():
                    processed_texts.append(text)
        
        # 处理最后一个API
        if current_api and api_content:
            api_text = self._format_api_content(api_content)
            processed_texts.append(api_text)
        
        # 添加文件路径作为元数据
        processed_texts = [f"文件路径: {file_path}\n类型: API文档\n\n{text}" for text in processed_texts]
        
        return processed_texts

    def _format_api_content(self, content_list: List[str]) -> str:
        """格式化API内容，确保代码块和说明的正确展示"""
        formatted_parts = []
        current_part = []
        in_code_block = False
        code_lang = ""
        
        for line in content_list:
            # 检测代码块的开始
            if "```" in line:
                if not in_code_block:
                    # 代码块开始
                    in_code_block = True
                    # 获取代码语言（如果指定了的话）
                    code_lang = line.replace("```", "").strip()
                    if code_lang:
                        current_part.append(f"代码示例 ({code_lang}):")
                    else:
                        current_part.append("代码示例:")
                    continue
                else:
                    # 代码块结束
                    in_code_block = False
                    # 添加当前部分并重置
                    if current_part:
                        formatted_parts.append("\n".join(current_part))
                        current_part = []
                    continue
            
            # 处理代码块内的内容
            if in_code_block:
                current_part.append(line)
            else:
                # 处理说明文本
                if line.strip():
                    if line.startswith("说明：") or line.startswith("参数说明："):
                        if current_part:
                            formatted_parts.append("\n".join(current_part))
                            current_part = []
                        current_part.append(line)
                    else:
                        current_part.append(line)
        
        # 添加最后一部分
        if current_part:
            formatted_parts.append("\n".join(current_part))
        
        # 合并所有部分
        return "\n\n".join(formatted_parts)

    def _process_regular_doc(self, content: str, file_path: str) -> List[str]:
        """处理普通文档，使用标准的分块策略"""
        # 1. 先按标题分割
        splits = self.header_splitter.split_text(content)
        
        processed_texts = []
        
        for split in splits:
            metadata = split.metadata
            text = split.page_content.strip()
            
            # 获取标题级别
            level = next((v for k, v in metadata.items() if k.startswith('h')), None)
            
            # 如果内容太长，需要进一步分块
            if len(text) > self.chunk_size:
                sub_texts = self.text_splitter.split_text(text)
                for sub_text in sub_texts:
                    if level:
                        processed_texts.append(f"标题: {level}\n\n{sub_text}")
                    else:
                        processed_texts.append(sub_text)
            else:
                if level:
                    processed_texts.append(f"标题: {level}\n\n{text}")
                else:
                    processed_texts.append(text)
        
        # 添加文件路径作为元数据
        processed_texts = [f"文件路径: {file_path}\n\n{text}" for text in processed_texts]
        
        return processed_texts

    def load_directory(self, directory: str, supported_extensions: List[str] = None) -> Dict[str, List[str]]:
        """递归加载目录下所有支持的文档文件"""
        if supported_extensions is None:
            supported_extensions = ['.md', '.txt']
            
        documents = {}
        total_chunks = 0
        
        # 递归遍历目录
        for root, _, files in os.walk(directory):
            for filename in files:
                if any(filename.lower().endswith(ext) for ext in supported_extensions):
                    full_path = os.path.join(root, filename)
                    relative_path = os.path.relpath(full_path, directory)
                    
                    try:
                        # 读取文件内容
                        content = self._read_file_with_fallback_encoding(full_path)
                        if not content:
                            continue
                            
                        # 处理 Markdown 内容
                        processed_texts = self.process_markdown_content(content, relative_path)
                        
                        if processed_texts:
                            documents[relative_path] = processed_texts
                            total_chunks += len(processed_texts)
                            print(f"\n成功加载文档: {relative_path} (包含 {len(processed_texts)} 个文本块)")
                            # 打印每个文本块的预览
                            for i, text in enumerate(processed_texts, 1):
                                preview = text.split('\n\n')[0]  # 只显示第一行
                                print(f"  块 {i}: {preview}")
                        
                    except Exception as e:
                        print(f"加载文档失败 {relative_path}: {str(e)}")
                        continue
        
        print(f"\n总共加载了 {len(documents)} 个文档，{total_chunks} 个文本块")
        return documents

    def _read_file_with_fallback_encoding(self, file_path: str) -> str:
        """使用多种编码尝试读取文件"""
        encodings = ['utf-8', 'gbk', 'gb2312', 'gb18030', sys.getfilesystemencoding()]
        
        for encoding in encodings:
            try:
                with open(file_path, 'r', encoding=encoding) as f:
                    content = f.read()
                if encoding != 'utf-8':
                    print(f"使用 {encoding} 编码成功读取: {file_path}")
                return content
            except UnicodeDecodeError:
                continue
            except Exception as e:
                print(f"读取文件失败 {file_path}: {str(e)}")
                return ""
                
        print(f"所有编码尝试失败: {file_path}")
        return "" 