"""
文档结构分块方法演示
演示基于文档结构的文本分块方法，包括Markdown、HTML、代码等
"""
import re
import json
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass

# 尝试导入BeautifulSoup，如果不可用则使用正则表达式
try:
    from bs4 import BeautifulSoup
    BEAUTIFULSOUP_AVAILABLE = True
except ImportError:
    BEAUTIFULSOUP_AVAILABLE = False

# 使用通用导入工具
try:
    from .import_utils import get_basic_chunker_classes
    BasicChunker, Chunk = get_basic_chunker_classes()
except ImportError:
    # 如果导入工具不可用，使用备用方法
    try:
        import sys
        import os
        sys.path.append(os.path.dirname(os.path.abspath(__file__)))
        from chunk_demo01_basic_chunking import BasicChunker, Chunk
    except ImportError:
        # 最后的直接导入尝试
        from basic_chunking import BasicChunker, Chunk


class DocumentStructureChunker(BasicChunker):
    """文档结构分块器基类"""
    
    def __init__(self):
        super().__init__()
        self.document_structure = None
    
    def parse_document_structure(self, text: str) -> Dict[str, Any]:
        """解析文档结构"""
        raise NotImplementedError("子类必须实现此方法")
    
    def chunk_by_structure(self, text: str, structure: Dict[str, Any]) -> List[Chunk]:
        """根据文档结构分块"""
        raise NotImplementedError("子类必须实现此方法")


class MarkdownChunker(DocumentStructureChunker):
    """Markdown文档分块器"""
    
    def __init__(self, max_chunk_size: int = 2000, 
                 preserve_headers: bool = True,
                 chunk_by_sections: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.preserve_headers = preserve_headers
        self.chunk_by_sections = chunk_by_sections
    
    def parse_document_structure(self, text: str) -> Dict[str, Any]:
        """解析Markdown文档结构"""
        lines = text.split('\n')
        structure = {
            'headers': [],
            'sections': [],
            'code_blocks': [],
            'tables': [],
            'lists': []
        }
        
        current_section = None
        in_code_block = False
        code_block_content = []
        code_block_start = 0
        
        for i, line in enumerate(lines):
            # 检测标题
            header_match = re.match(r'^(#{1,6})\s+(.+)', line)
            if header_match:
                level = len(header_match.group(1))
                title = header_match.group(2).strip()
                
                header_info = {
                    'level': level,
                    'title': title,
                    'line_index': i,
                    'start_pos': text.find(line, 0 if i == 0 else structure['headers'][-1]['end_pos'] if structure['headers'] else 0)
                }
                header_info['end_pos'] = header_info['start_pos'] + len(line)
                
                structure['headers'].append(header_info)
                
                # 结束当前section
                if current_section:
                    current_section['end_line'] = i - 1
                    structure['sections'].append(current_section)
                
                # 开始新section
                current_section = {
                    'header': header_info,
                    'start_line': i,
                    'content': []
                }
            
            # 检测代码块
            if line.strip().startswith('```'):
                if not in_code_block:
                    in_code_block = True
                    code_block_content = []
                    code_block_start = i
                else:
                    in_code_block = False
                    code_block_info = {
                        'start_line': code_block_start,
                        'end_line': i,
                        'content': '\n'.join(code_block_content),
                        'language': lines[code_block_start].strip()[3:] or 'unknown'
                    }
                    structure['code_blocks'].append(code_block_info)
                    code_block_content = []
            elif in_code_block:
                code_block_content.append(line)
            
            # 检测表格
            if '|' in line and line.strip():
                table_match = re.match(r'^\|(.+)\|$', line.strip())
                if table_match:
                    table_info = {
                        'line_index': i,
                        'content': line,
                        'is_table': True
                    }
                    structure['tables'].append(table_info)
            
            # 检测列表
            list_match = re.match(r'^\s*[-*+]\s+(.+)', line)
            if list_match:
                list_info = {
                    'line_index': i,
                    'content': line,
                    'indent_level': len(line) - len(line.lstrip()),
                    'is_list': True
                }
                structure['lists'].append(list_info)
            
            # 添加到当前section
            if current_section and not header_match:
                current_section['content'].append(line)
        
        # 结束最后一个section
        if current_section:
            current_section['end_line'] = len(lines) - 1
            structure['sections'].append(current_section)
        
        self.document_structure = structure
        return structure
    
    def chunk(self, text: str) -> List[Chunk]:
        """Markdown文档分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 解析文档结构
        structure = self.parse_document_structure(text)
        
        if self.chunk_by_sections and structure['sections']:
            # 按section分块
            chunks = []
            for section in structure['sections']:
                section_text = '\n'.join(section['content'])
                
                if self.preserve_headers and section['header']:
                    header_text = '#' * section['header']['level'] + ' ' + section['header']['title']
                    section_text = header_text + '\n' + section_text
                
                # 如果section太大，进一步分割
                if len(section_text) > self.max_chunk_size:
                    sub_chunks = self._split_large_section(section_text, section)
                    chunks.extend(sub_chunks)
                else:
                    chunk = Chunk(
                        text=section_text,
                        start_index=section['header']['start_pos'] if section['header'] else 0,
                        end_index=section['header']['start_pos'] + len(section_text) if section['header'] else len(section_text),
                        chunk_type='markdown_section',
                        metadata={
                            'header_level': section['header']['level'] if section['header'] else None,
                            'header_title': section['header']['title'] if section['header'] else None,
                            'section_lines': section['end_line'] - section['start_line'] + 1
                        }
                    )
                    chunks.append(chunk)
            
            self.chunks = chunks
            return chunks
        else:
            # 按其他结构分块
            return self._chunk_by_other_structures(text, structure)
    
    def _split_large_section(self, section_text: str, section: Dict[str, Any]) -> List[Chunk]:
        """分割大section"""
        paragraphs = section_text.split('\n\n')
        chunks = []
        current_chunk = ""
        current_start = section['header']['start_pos'] if section['header'] else 0
        
        for paragraph in paragraphs:
            if len(current_chunk + paragraph) > self.max_chunk_size:
                if current_chunk:
                    chunk = Chunk(
                        text=current_chunk,
                        start_index=current_start,
                        end_index=current_start + len(current_chunk),
                        chunk_type='markdown_section',
                        metadata={
                            'is_sub_chunk': True,
                            'header_level': section['header']['level'] if section['header'] else None
                        }
                    )
                    chunks.append(chunk)
                    current_start += len(current_chunk) + 2  # +2 for \n\n
                    current_chunk = ""
            
            current_chunk += paragraph + "\n\n"
        
        if current_chunk:
            chunk = Chunk(
                text=current_chunk.strip(),
                start_index=current_start,
                end_index=current_start + len(current_chunk.strip()),
                chunk_type='markdown_section',
                metadata={
                    'is_sub_chunk': True,
                    'header_level': section['header']['level'] if section['header'] else None
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def _chunk_by_other_structures(self, text: str, structure: Dict[str, Any]) -> List[Chunk]:
        """按其他结构分块"""
        chunks = []
        
        # 处理代码块
        for code_block in structure['code_blocks']:
            chunk = Chunk(
                text=f"```{code_block['language']}\n{code_block['content']}\n```",
                start_index=text.find(f"```{code_block['language']}", 0),
                end_index=text.find("```", text.find(f"```{code_block['language']}", 0)) + 3,
                chunk_type='markdown_code',
                metadata={
                    'language': code_block['language'],
                    'lines': code_block['end_line'] - code_block['start_line'] + 1
                }
            )
            chunks.append(chunk)
        
        # 处理表格
        for table in structure['tables']:
            chunk = Chunk(
                text=table['content'],
                start_index=text.find(table['content'], 0),
                end_index=text.find(table['content'], 0) + len(table['content']),
                chunk_type='markdown_table',
                metadata={'is_table': True}
            )
            chunks.append(chunk)
        
        return chunks


class HTMLChunker(DocumentStructureChunker):
    """HTML文档分块器"""
    
    def __init__(self, max_chunk_size: int = 2000,
                 chunk_by_tags: List[str] = None,
                 preserve_structure: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.chunk_by_tags = chunk_by_tags or ['div', 'section', 'article', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']
        self.preserve_structure = preserve_structure
    
    def parse_document_structure(self, text: str) -> Dict[str, Any]:
        """解析HTML文档结构"""
        if not BEAUTIFULSOUP_AVAILABLE:
            # 使用正则表达式解析
            return self._parse_html_with_regex(text)
        
        soup = BeautifulSoup(text, 'html.parser')
        
        structure = {
            'title': soup.title.string if soup.title else None,
            'headings': [],
            'paragraphs': [],
            'sections': [],
            'lists': [],
            'tables': [],
            'code_blocks': []
        }
        
        # 解析标题
        for level in range(1, 7):
            headings = soup.find_all(f'h{level}')
            for heading in headings:
                structure['headings'].append({
                    'level': level,
                    'text': heading.get_text().strip(),
                    'attributes': dict(heading.attrs)
                })
        
        # 解析段落
        paragraphs = soup.find_all('p')
        for p in paragraphs:
            structure['paragraphs'].append({
                'text': p.get_text().strip(),
                'attributes': dict(p.attrs)
            })
        
        # 解析section
        sections = soup.find_all(['section', 'article', 'div'])
        for section in sections:
            structure['sections'].append({
                'tag': section.name,
                'text': section.get_text().strip(),
                'attributes': dict(section.attrs)
            })
        
        # 解析列表
        lists = soup.find_all(['ul', 'ol', 'li'])
        for lst in lists:
            structure['lists'].append({
                'tag': lst.name,
                'text': lst.get_text().strip(),
                'attributes': dict(lst.attrs)
            })
        
        # 解析表格
        tables = soup.find_all('table')
        for table in tables:
            structure['tables'].append({
                'text': table.get_text().strip(),
                'attributes': dict(table.attrs)
            })
        
        # 解析代码块
        code_blocks = soup.find_all(['code', 'pre'])
        for code in code_blocks:
            structure['code_blocks'].append({
                'tag': code.name,
                'text': code.get_text().strip(),
                'attributes': dict(code.attrs)
            })
        
        self.document_structure = structure
        return structure
    
    def _parse_html_with_regex(self, text: str) -> Dict[str, Any]:
        """使用正则表达式解析HTML"""
        structure = {
            'title': None,
            'headings': [],
            'paragraphs': [],
            'sections': [],
            'lists': [],
            'tables': [],
            'code_blocks': []
        }
        
        # 提取标题
        for level in range(1, 7):
            heading_pattern = f'<h{level}[^>]*>(.*?)</h{level}>'
            headings = re.findall(heading_pattern, text, re.DOTALL)
            for heading in headings:
                structure['headings'].append({
                    'level': level,
                    'text': heading.strip(),
                    'attributes': {}
                })
        
        # 提取段落
        paragraph_pattern = r'<p[^>]*>(.*?)</p>'
        paragraphs = re.findall(paragraph_pattern, text, re.DOTALL)
        for p in paragraphs:
            structure['paragraphs'].append({
                'text': p.strip(),
                'attributes': {}
            })
        
        # 提取代码块
        code_pattern = r'<(code|pre)[^>]*>(.*?)</\1>'
        code_blocks = re.findall(code_pattern, text, re.DOTALL)
        for tag, code in code_blocks:
            structure['code_blocks'].append({
                'tag': tag,
                'text': code.strip(),
                'attributes': {}
            })
        
        return structure
    
    def chunk(self, text: str) -> List[Chunk]:
        """HTML文档分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 解析文档结构
        structure = self.parse_document_structure(text)
        
        if BEAUTIFULSOUP_AVAILABLE:
            return self._chunk_with_beautifulsoup(text, structure)
        else:
            return self._chunk_with_regex(text, structure)
    
    def _chunk_with_beautifulsoup(self, text: str, structure: Dict[str, Any]) -> List[Chunk]:
        """使用BeautifulSoup分块"""
        soup = BeautifulSoup(text, 'html.parser')
        chunks = []
        
        # 按指定标签分块
        for tag_name in self.chunk_by_tags:
            elements = soup.find_all(tag_name)
            for element in elements:
                element_text = element.get_text().strip()
                
                if element_text and len(element_text) > 10:
                    # 在原始文本中找到元素位置
                    element_str = str(element)
                    start_pos = text.find(element_str)
                    
                    if start_pos != -1:
                        chunk = Chunk(
                            text=element_text,
                            start_index=start_pos,
                            end_index=start_pos + len(element_str),
                            chunk_type='html_element',
                            metadata={
                                'tag': tag_name,
                                'attributes': dict(element.attrs),
                                'text_length': len(element_text)
                            }
                        )
                        chunks.append(chunk)
        
        self.chunks = chunks
        return chunks
    
    def _chunk_with_regex(self, text: str, structure: Dict[str, Any]) -> List[Chunk]:
        """使用正则表达式分块"""
        chunks = []
        
        # 按段落分块
        for paragraph in structure['paragraphs']:
            if paragraph['text']:
                start_pos = text.find(paragraph['text'])
                if start_pos != -1:
                    chunk = Chunk(
                        text=paragraph['text'],
                        start_index=start_pos,
                        end_index=start_pos + len(paragraph['text']),
                        chunk_type='html_paragraph',
                        metadata={'tag': 'p'}
                    )
                    chunks.append(chunk)
        
        # 按标题分块
        for heading in structure['headings']:
            if heading['text']:
                start_pos = text.find(heading['text'])
                if start_pos != -1:
                    chunk = Chunk(
                        text=heading['text'],
                        start_index=start_pos,
                        end_index=start_pos + len(heading['text']),
                        chunk_type='html_heading',
                        metadata={
                            'tag': f'h{heading["level"]}',
                            'level': heading['level']
                        }
                    )
                    chunks.append(chunk)
        
        self.chunks = chunks
        return chunks


class CodeChunker(DocumentStructureChunker):
    """代码文档分块器"""
    
    def __init__(self, max_chunk_size: int = 1000,
                 chunk_by_functions: bool = True,
                 chunk_by_classes: bool = True,
                 preserve_comments: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.chunk_by_functions = chunk_by_functions
        self.chunk_by_classes = chunk_by_classes
        self.preserve_comments = preserve_comments
    
    def parse_document_structure(self, text: str) -> Dict[str, Any]:
        """解析代码结构"""
        structure = {
            'functions': [],
            'classes': [],
            'imports': [],
            'comments': [],
            'blocks': []
        }
        
        lines = text.split('\n')
        
        for i, line in enumerate(lines):
            line_stripped = line.strip()
            
            # 检测函数定义
            func_match = re.match(r'^\s*(def\s+\w+|function\s+\w+|\w+\s*\([^)]*\)\s*{)', line)
            if func_match:
                structure['functions'].append({
                    'line_index': i,
                    'signature': line_stripped,
                    'name': re.search(r'(def\s+|function\s+)?(\w+)', line_stripped).group(2)
                })
            
            # 检测类定义
            class_match = re.match(r'^\s*(class\s+\w+)', line)
            if class_match:
                structure['classes'].append({
                    'line_index': i,
                    'signature': line_stripped,
                    'name': re.search(r'class\s+(\w+)', line_stripped).group(1)
                })
            
            # 检测导入
            import_match = re.match(r'^\s*(import\s+\w+|from\s+\w+\s+import)', line)
            if import_match:
                structure['imports'].append({
                    'line_index': i,
                    'statement': line_stripped
                })
            
            # 检测注释
            comment_match = re.match(r'^\s*(#|//|/\*|\*)', line)
            if comment_match:
                structure['comments'].append({
                    'line_index': i,
                    'content': line_stripped
                })
        
        self.document_structure = structure
        return structure
    
    def chunk(self, text: str) -> List[Chunk]:
        """代码文档分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 解析代码结构
        structure = self.parse_document_structure(text)
        
        chunks = []
        
        # 按函数分块
        if self.chunk_by_functions and structure['functions']:
            for func in structure['functions']:
                func_chunk = self._extract_function_chunk(text, func)
                if func_chunk:
                    chunks.append(func_chunk)
        
        # 按类分块
        if self.chunk_by_classes and structure['classes']:
            for cls in structure['classes']:
                class_chunk = self._extract_class_chunk(text, cls)
                if class_chunk:
                    chunks.append(class_chunk)
        
        # 如果没有找到函数或类，按行分块
        if not chunks:
            chunks = self._chunk_by_lines(text)
        
        self.chunks = chunks
        return chunks
    
    def _extract_function_chunk(self, text: str, func_info: Dict[str, Any]) -> Optional[Chunk]:
        """提取函数块"""
        lines = text.split('\n')
        start_line = func_info['line_index']
        
        # 找到函数的结束位置（简单的缩进检测）
        func_lines = []
        base_indent = len(lines[start_line]) - len(lines[start_line].lstrip())
        
        for i in range(start_line, len(lines)):
            line = lines[i]
            if i == start_line:
                func_lines.append(line)
            else:
                current_indent = len(line) - len(line.lstrip())
                if line.strip() and current_indent <= base_indent and not line.strip().startswith('#'):
                    break
                func_lines.append(line)
        
        func_text = '\n'.join(func_lines)
        
        if len(func_text) > self.max_chunk_size:
            # 如果函数太大，进一步分割
            return self._split_large_function(func_text, start_line, func_info)
        else:
            return Chunk(
                text=func_text,
                start_index=text.find(func_text),
                end_index=text.find(func_text) + len(func_text),
                chunk_type='code_function',
                metadata={
                    'function_name': func_info['name'],
                    'line_count': len(func_lines),
                    'signature': func_info['signature']
                }
            )
    
    def _extract_class_chunk(self, text: str, class_info: Dict[str, Any]) -> Optional[Chunk]:
        """提取类块"""
        lines = text.split('\n')
        start_line = class_info['line_index']
        
        # 找到类的结束位置（简单的缩进检测）
        class_lines = []
        base_indent = len(lines[start_line]) - len(lines[start_line].lstrip())
        
        for i in range(start_line, len(lines)):
            line = lines[i]
            if i == start_line:
                class_lines.append(line)
            else:
                current_indent = len(line) - len(line.lstrip())
                if line.strip() and current_indent <= base_indent and not line.strip().startswith('#'):
                    break
                class_lines.append(line)
        
        class_text = '\n'.join(class_lines)
        
        if len(class_text) > self.max_chunk_size:
            # 如果类太大，进一步分割
            return self._split_large_class(class_text, start_line, class_info)
        else:
            return Chunk(
                text=class_text,
                start_index=text.find(class_text),
                end_index=text.find(class_text) + len(class_text),
                chunk_type='code_class',
                metadata={
                    'class_name': class_info['name'],
                    'line_count': len(class_lines),
                    'signature': class_info['signature']
                }
            )
    
    def _split_large_function(self, func_text: str, start_line: int, 
                             func_info: Dict[str, Any]) -> List[Chunk]:
        """分割大函数"""
        chunks = []
        lines = func_text.split('\n')
        
        # 按逻辑块分割
        current_chunk = []
        current_start = start_line
        
        for i, line in enumerate(lines):
            current_chunk.append(line)
            
            if len('\n'.join(current_chunk)) > self.max_chunk_size:
                chunk_text = '\n'.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=text.find(chunk_text),
                    end_index=text.find(chunk_text) + len(chunk_text),
                    chunk_type='code_function',
                    metadata={
                        'function_name': func_info['name'],
                        'is_sub_chunk': True,
                        'line_count': len(current_chunk)
                    }
                )
                chunks.append(chunk)
                current_chunk = []
                current_start += len(current_chunk)
        
        if current_chunk:
            chunk_text = '\n'.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=text.find(chunk_text),
                end_index=text.find(chunk_text) + len(chunk_text),
                chunk_type='code_function',
                metadata={
                    'function_name': func_info['name'],
                    'is_sub_chunk': True,
                    'line_count': len(current_chunk)
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def _split_large_class(self, class_text: str, start_line: int, 
                          class_info: Dict[str, Any]) -> List[Chunk]:
        """分割大类"""
        # 简单实现：按方法分割
        chunks = []
        lines = class_text.split('\n')
        
        # 找到所有方法
        methods = []
        for i, line in enumerate(lines):
            if re.match(r'\s*def\s+', line):
                methods.append(i)
        
        # 按方法分割
        for i, method_start in enumerate(methods):
            method_end = methods[i + 1] if i + 1 < len(methods) else len(lines)
            method_lines = lines[method_start:method_end]
            method_text = '\n'.join(method_lines)
            
            chunk = Chunk(
                text=method_text,
                start_index=text.find(method_text),
                end_index=text.find(method_text) + len(method_text),
                chunk_type='code_class_method',
                metadata={
                    'class_name': class_info['name'],
                    'is_sub_chunk': True,
                    'line_count': len(method_lines)
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def _chunk_by_lines(self, text: str) -> List[Chunk]:
        """按行分块"""
        lines = text.split('\n')
        chunks = []
        current_chunk = []
        current_start = 0
        
        for i, line in enumerate(lines):
            current_chunk.append(line)
            
            if len('\n'.join(current_chunk)) > self.max_chunk_size:
                chunk_text = '\n'.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='code_lines',
                    metadata={'line_count': len(current_chunk)}
                )
                chunks.append(chunk)
                current_chunk = []
                current_start += len(chunk_text) + 1
        
        if current_chunk:
            chunk_text = '\n'.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='code_lines',
                metadata={'line_count': len(current_chunk)}
            )
            chunks.append(chunk)
        
        return chunks


class StructuredTextChunker(DocumentStructureChunker):
    """结构化文本分块器"""
    
    def __init__(self, max_chunk_size: int = 2000,
                 section_patterns: List[str] = None):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.section_patterns = section_patterns or [
            r'^第[一二三四五六七八九十\d]+[章节]',
            r'^[一二三四五六七八九十\d]+[、\.]',
            r'^[A-Z][a-z]+[一二三四五六七八九十\d]+[章节]',
            r'^\d+\.\d+',
            r'^[IVX]+\.'
        ]
    
    def parse_document_structure(self, text: str) -> Dict[str, Any]:
        """解析结构化文本"""
        structure = {
            'sections': [],
            'headings': [],
            'paragraphs': []
        }
        
        lines = text.split('\n')
        current_section = None
        
        for i, line in enumerate(lines):
            line_stripped = line.strip()
            
            # 检测章节标题
            for pattern in self.section_patterns:
                if re.match(pattern, line_stripped):
                    if current_section:
                        current_section['end_line'] = i - 1
                        structure['sections'].append(current_section)
                    
                    current_section = {
                        'title': line_stripped,
                        'start_line': i,
                        'level': self._get_section_level(line_stripped),
                        'content': []
                    }
                    break
            
            # 检测其他标题
            if not re.match('|'.join(self.section_patterns), line_stripped):
                if re.match(r'^[一二三四五六七八九十\d]+[、\.]', line_stripped):
                    structure['headings'].append({
                        'text': line_stripped,
                        'line_index': i,
                        'level': 2
                    })
            
            # 添加到当前section
            if current_section:
                current_section['content'].append(line)
        
        # 结束最后一个section
        if current_section:
            current_section['end_line'] = len(lines) - 1
            structure['sections'].append(current_section)
        
        self.document_structure = structure
        return structure
    
    def _get_section_level(self, title: str) -> int:
        """获取章节级别"""
        if re.match(r'^第[一二三四五六七八九十\d]+[章节]', title):
            return 1
        elif re.match(r'^[一二三四五六七八九十\d]+[、\.]', title):
            return 2
        elif re.match(r'^\d+\.\d+', title):
            return 3
        else:
            return 4
    
    def chunk(self, text: str) -> List[Chunk]:
        """结构化文本分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 解析文档结构
        structure = self.parse_document_structure(text)
        
        chunks = []
        
        # 按section分块
        for section in structure['sections']:
            section_text = '\n'.join(section['content'])
            
            if len(section_text) > self.max_chunk_size:
                # 如果section太大，进一步分割
                sub_chunks = self._split_large_section(section_text, section)
                chunks.extend(sub_chunks)
            else:
                chunk = Chunk(
                    text=section_text,
                    start_index=text.find(section_text),
                    end_index=text.find(section_text) + len(section_text),
                    chunk_type='structured_section',
                    metadata={
                        'section_title': section['title'],
                        'section_level': section['level'],
                        'line_count': section['end_line'] - section['start_line'] + 1
                    }
                )
                chunks.append(chunk)
        
        self.chunks = chunks
        return chunks
    
    def _split_large_section(self, section_text: str, section: Dict[str, Any]) -> List[Chunk]:
        """分割大section"""
        paragraphs = section_text.split('\n\n')
        chunks = []
        current_chunk = ""
        current_start = text.find(section_text)
        
        for paragraph in paragraphs:
            if len(current_chunk + paragraph) > self.max_chunk_size:
                if current_chunk:
                    chunk = Chunk(
                        text=current_chunk,
                        start_index=current_start,
                        end_index=current_start + len(current_chunk),
                        chunk_type='structured_section',
                        metadata={
                            'section_title': section['title'],
                            'section_level': section['level'],
                            'is_sub_chunk': True
                        }
                    )
                    chunks.append(chunk)
                    current_start += len(current_chunk) + 2
                    current_chunk = ""
            
            current_chunk += paragraph + "\n\n"
        
        if current_chunk:
            chunk = Chunk(
                text=current_chunk.strip(),
                start_index=current_start,
                end_index=current_start + len(current_chunk.strip()),
                chunk_type='structured_section',
                metadata={
                    'section_title': section['title'],
                    'section_level': section['level'],
                    'is_sub_chunk': True
                }
            )
            chunks.append(chunk)
        
        return chunks


def main():
    """主函数 - 运行文档结构分块演示"""
    print("=" * 60)
    print("文档结构分块方法演示")
    print("=" * 60)
    
    # 1. Markdown分块演示
    print("\n" + "=" * 40)
    print("1. Markdown文档分块")
    print("=" * 40)
    
    markdown_text = """
# 人工智能技术概述

## 1. 机器学习基础

机器学习是人工智能的核心技术之一。它使计算机能够从数据中学习而无需明确编程。

### 1.1 监督学习

监督学习使用标记数据进行训练。常见的算法包括：
- 线性回归
- 决策树
- 支持向量机

### 1.2 无监督学习

无监督学习处理未标记的数据，主要用于：
- 聚类分析
- 降维
- 异常检测

## 2. 深度学习

深度学习使用神经网络来模拟人脑的工作方式。

```python
# 简单的神经网络示例
import tensorflow as tf

model = tf.keras.Sequential([
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(64, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')
])
```

### 2.1 卷积神经网络

CNN主要用于图像处理任务。

### 2.2 循环神经网络

RNN适用于序列数据处理。

## 3. 应用领域

AI在以下领域有广泛应用：

| 领域 | 应用 | 技术栈 |
|------|------|--------|
| 医疗 | 疾病诊断 | CNN, RNN |
| 金融 | 风险评估 | 随机森林, XGBoost |
| 教育 | 个性化学习 | 推荐系统 |

1. **医疗保健**：AI可以帮助医生诊断疾病
2. **金融科技**：用于风险评估和欺诈检测
3. **教育培训**：提供个性化学习体验
    """
    
    md_chunker = MarkdownChunker(max_chunk_size=500, chunk_by_sections=True)
    chunks = md_chunker.chunk(markdown_text)
    md_chunker.print_chunks(max_chunks=3)
    
    # 2. HTML分块演示
    print("\n" + "=" * 40)
    print("2. HTML文档分块")
    print("=" * 40)
    
    html_text = """
<!DOCTYPE html>
<html>
<head>
    <title>AI技术文档</title>
</head>
<body>
    <h1>人工智能技术概述</h1>
    
    <section>
        <h2>机器学习基础</h2>
        <p>机器学习是人工智能的核心技术之一。它使计算机能够从数据中学习而无需明确编程。</p>
        
        <h3>监督学习</h3>
        <p>监督学习使用标记数据进行训练。常见的算法包括线性回归、决策树、支持向量机等。</p>
        
        <h3>无监督学习</h3>
        <p>无监督学习处理未标记的数据，主要用于聚类分析、降维、异常检测等。</p>
    </section>
    
    <section>
        <h2>深度学习</h2>
        <p>深度学习使用神经网络来模拟人脑的工作方式。</p>
        
        <pre><code>
# 简单的神经网络示例
import tensorflow as tf

model = tf.keras.Sequential([
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(64, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')
])
        </code></pre>
    </section>
    
    <section>
        <h2>应用领域</h2>
        <ul>
            <li><strong>医疗保健</strong>：AI可以帮助医生诊断疾病</li>
            <li><strong>金融科技</strong>：用于风险评估和欺诈检测</li>
            <li><strong>教育培训</strong>：提供个性化学习体验</li>
        </ul>
    </section>
</body>
</html>
    """
    
    html_chunker = HTMLChunker(max_chunk_size=500)
    chunks = html_chunker.chunk(html_text)
    html_chunker.print_chunks(max_chunks=3)
    
    # 3. 代码分块演示
    print("\n" + "=" * 40)
    print("3. 代码文档分块")
    print("=" * 40)
    
    code_text = '''
# 机器学习工具类
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

class MachineLearningUtils:
    """机器学习工具类"""
    
    def __init__(self, random_state=42):
        self.random_state = random_state
        np.random.seed(random_state)
    
    def prepare_data(self, X, y, test_size=0.2):
        """
        准备训练和测试数据
        
        Args:
            X: 特征数据
            y: 标签数据
            test_size: 测试集比例
            
        Returns:
            X_train, X_test, y_train, y_test
        """
        return train_test_split(X, y, test_size=test_size, 
                               random_state=self.random_state)
    
    def evaluate_model(self, model, X_test, y_test):
        """
        评估模型性能
        
        Args:
            model: 训练好的模型
            X_test: 测试特征
            y_test: 测试标签
            
        Returns:
            准确率
        """
        y_pred = model.predict(X_test)
        return accuracy_score(y_test, y_pred)
    
    def feature_importance(self, model, feature_names):
        """
        获取特征重要性
        
        Args:
            model: 训练好的模型
            feature_names: 特征名称列表
            
        Returns:
            特征重要性字典
        """
        if hasattr(model, 'feature_importances_'):
            importance_dict = dict(zip(feature_names, model.feature_importances_))
            return dict(sorted(importance_dict.items(), 
                             key=lambda x: x[1], reverse=True))
        else:
            return {}

class DataProcessor:
    """数据处理器"""
    
    def __init__(self):
        self.scaler = None
        self.encoder = None
    
    def normalize_features(self, X):
        """
        标准化特征
        
        Args:
            X: 特征数据
            
        Returns:
            标准化后的特征
        """
        from sklearn.preprocessing import StandardScaler
        self.scaler = StandardScaler()
        return self.scaler.fit_transform(X)
    
    def encode_labels(self, y):
        """
        编码标签
        
        Args:
            y: 标签数据
            
        Returns:
            编码后的标签
        """
        from sklearn.preprocessing import LabelEncoder
        self.encoder = LabelEncoder()
        return self.encoder.fit_transform(y)
    '''
    
    code_chunker = CodeChunker(max_chunk_size=300, chunk_by_functions=True, chunk_by_classes=True)
    chunks = code_chunker.chunk(code_text)
    code_chunker.print_chunks(max_chunks=3)
    
    # 4. 结构化文本分块演示
    print("\n" + "=" * 40)
    print("4. 结构化文本分块")
    print("=" * 40)
    
    structured_text = """
第一章 人工智能概述

人工智能（AI）是计算机科学的一个分支，致力于创建能够执行通常需要人类智能的任务的系统。

第一节 基本概念

人工智能的基本概念包括：
1. 智能体（Agent）
2. 环境（Environment）
3. 感知（Perception）
4. 行动（Action）

第二节 发展历史

人工智能的发展可以分为几个阶段：
- 1950年代：诞生期
- 1960-1970年代：第一次AI热潮
- 1980-1990年代：专家系统时代
- 2000年代至今：深度学习时代

第二章 机器学习

机器学习是AI的核心技术之一，它使计算机能够从数据中学习而无需明确编程。

第一节 监督学习

监督学习使用标记数据进行训练。常见的算法包括线性回归、决策树、支持向量机等。

第二节 无监督学习

无监督学习处理未标记的数据，主要用于聚类分析、降维、异常检测等。

第三章 深度学习

深度学习使用神经网络来模拟人脑的工作方式。

第一节 神经网络基础

神经网络的基本组成单元是神经元，多个神经元组成神经网络层。

第二节 卷积神经网络

CNN主要用于图像处理任务，通过卷积层提取图像特征。

第三节 循环神经网络

RNN适用于序列数据处理，如自然语言处理和时间序列分析。
    """
    
    structured_chunker = StructuredTextChunker(max_chunk_size=400)
    chunks = structured_chunker.chunk(structured_text)
    structured_chunker.print_chunks(max_chunks=3)
    
    # 保存结果
    print("\n" + "=" * 40)
    print("保存分块结果")
    print("=" * 40)
    md_chunker.save_chunks("markdown_chunks.json")
    html_chunker.save_chunks("html_chunks.json")
    code_chunker.save_chunks("code_chunks.json")
    structured_chunker.save_chunks("structured_chunks.json")
    
    print("\n" + "=" * 60)
    print("文档结构分块演示完成！")
    print("=" * 60)


if __name__ == "__main__":
    main()