"""
特殊格式分块方法演示
演示针对特殊格式的文本分块方法，包括表格、JSON、XML、日志等
"""
import re
import json
import xml.etree.ElementTree as ET
from typing import List, Dict, Any, Optional, Tuple, Union
from dataclasses import dataclass
from datetime import datetime
import csv
import io

try:
    from basic_chunking import BasicChunker, Chunk
except (ImportError, SyntaxError):
    from import_utils import safe_import
    BasicChunker, Chunk = safe_import(".01_basic_chunking", package="chunk_demo", from_list=["BasicChunker", "Chunk"])


class TableChunker(BasicChunker):
    """表格分块器"""
    
    def __init__(self, max_rows_per_chunk: int = 50,
                 chunk_by_columns: bool = False,
                 preserve_header: bool = True):
        super().__init__()
        self.max_rows_per_chunk = max_rows_per_chunk
        self.chunk_by_columns = chunk_by_columns
        self.preserve_header = preserve_header
    
    def parse_csv_table(self, csv_text: str) -> List[Dict[str, str]]:
        """解析CSV表格"""
        reader = csv.DictReader(io.StringIO(csv_text))
        return list(reader)
    
    def parse_markdown_table(self, markdown_text: str) -> Tuple[List[str], List[List[str]]]:
        """解析Markdown表格"""
        lines = markdown_text.strip().split('\n')
        
        # 跳过表头分隔符
        if len(lines) >= 2 and lines[1].startswith('|---'):
            header_line = lines[0]
            data_lines = lines[2:]
        else:
            header_line = lines[0] if lines else ""
            data_lines = lines[1:]
        
        # 解析表头
        headers = [col.strip() for col in header_line.split('|')[1:-1]]
        
        # 解析数据行
        data = []
        for line in data_lines:
            if line.startswith('|'):
                row = [col.strip() for col in line.split('|')[1:-1]]
                data.append(row)
        
        return headers, data
    
    def chunk_csv(self, csv_text: str) -> List[Chunk]:
        """CSV表格分块"""
        try:
            data = self.parse_csv_table(csv_text)
            if not data:
                return []
            
            headers = list(data[0].keys())
            chunks = []
            
            if self.chunk_by_columns:
                # 按列分块
                for column in headers:
                    column_data = [row[column] for row in data]
                    chunk_text = f"{column}\n" + "\n".join(column_data)
                    
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=csv_text.find(column),
                        end_index=csv_text.find(column) + len(chunk_text),
                        chunk_type='table_column',
                        metadata={
                            'column_name': column,
                            'row_count': len(column_data),
                            'data_type': 'csv'
                        }
                    )
                    chunks.append(chunk)
            else:
                # 按行分块
                for i in range(0, len(data), self.max_rows_per_chunk):
                    chunk_data = data[i:i + self.max_rows_per_chunk]
                    
                    if self.preserve_header:
                        chunk_text = ",".join(headers) + "\n"
                        for row in chunk_data:
                            chunk_text += ",".join([row.get(col, "") for col in headers]) + "\n"
                    else:
                        chunk_text = ""
                        for row in chunk_data:
                            chunk_text += ",".join([row.get(col, "") for col in headers]) + "\n"
                    
                    chunk = Chunk(
                        text=chunk_text.strip(),
                        start_index=csv_text.find(chunk_data[0][headers[0]] if chunk_data else ""),
                        end_index=csv_text.find(chunk_data[-1][headers[-1]] if chunk_data else "") + len(chunk_text),
                        chunk_type='table_rows',
                        metadata={
                            'start_row': i,
                            'end_row': i + len(chunk_data) - 1,
                            'row_count': len(chunk_data),
                            'data_type': 'csv'
                        }
                    )
                    chunks.append(chunk)
            
            return chunks
            
        except Exception as e:
            print(f"CSV解析错误: {e}")
            return []
    
    def chunk_markdown_table(self, markdown_text: str) -> List[Chunk]:
        """Markdown表格分块"""
        try:
            headers, data = self.parse_markdown_table(markdown_text)
            if not headers or not data:
                return []
            
            chunks = []
            
            if self.chunk_by_columns:
                # 按列分块
                for i, header in enumerate(headers):
                    column_data = [row[i] if i < len(row) else "" for row in data]
                    chunk_text = f"{header}\n" + "\n".join(column_data)
                    
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=markdown_text.find(header),
                        end_index=markdown_text.find(header) + len(chunk_text),
                        chunk_type='table_column',
                        metadata={
                            'column_name': header,
                            'row_count': len(column_data),
                            'data_type': 'markdown'
                        }
                    )
                    chunks.append(chunk)
            else:
                # 按行分块
                for i in range(0, len(data), self.max_rows_per_chunk):
                    chunk_data = data[i:i + self.max_rows_per_chunk]
                    
                    if self.preserve_header:
                        chunk_text = "| " + " | ".join(headers) + " |\n"
                        chunk_text += "| " + " | ".join(["---"] * len(headers)) + " |\n"
                        for row in chunk_data:
                            chunk_text += "| " + " | ".join(row) + " |\n"
                    else:
                        chunk_text = ""
                        for row in chunk_data:
                            chunk_text += "| " + " | ".join(row) + " |\n"
                    
                    chunk = Chunk(
                        text=chunk_text.strip(),
                        start_index=markdown_text.find(chunk_data[0][0] if chunk_data else ""),
                        end_index=markdown_text.find(chunk_data[-1][-1] if chunk_data else "") + len(chunk_text),
                        chunk_type='table_rows',
                        metadata={
                            'start_row': i,
                            'end_row': i + len(chunk_data) - 1,
                            'row_count': len(chunk_data),
                            'data_type': 'markdown'
                        }
                    )
                    chunks.append(chunk)
            
            return chunks
            
        except Exception as e:
            print(f"Markdown表格解析错误: {e}")
            return []
    
    def chunk(self, text: str) -> List[Chunk]:
        """表格分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 检测表格类型
        if text.strip().startswith(',') or ',' in text.split('\n')[0]:
            # CSV格式
            chunks = self.chunk_csv(text)
        elif '|' in text and text.strip().startswith('|'):
            # Markdown格式
            chunks = self.chunk_markdown_table(text)
        else:
            # 尝试自动检测
            try:
                chunks = self.chunk_csv(text)
                if not chunks:
                    chunks = self.chunk_markdown_table(text)
            except:
                chunks = []
        
        self.chunks = chunks
        return chunks


class JSONChunker(BasicChunker):
    """JSON分块器"""
    
    def __init__(self, max_chunk_size: int = 2000,
                 chunk_by_structure: bool = True,
                 preserve_keys: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.chunk_by_structure = chunk_by_structure
        self.preserve_keys = preserve_keys
    
    def parse_json(self, json_text: str) -> Any:
        """解析JSON"""
        try:
            return json.loads(json_text)
        except json.JSONDecodeError as e:
            print(f"JSON解析错误: {e}")
            return None
    
    def chunk_json_object(self, obj: Any, path: str = "") -> List[Chunk]:
        """递归分块JSON对象"""
        chunks = []
        
        if isinstance(obj, dict):
            for key, value in obj.items():
                current_path = f"{path}.{key}" if path else key
                
                if isinstance(value, (dict, list)):
                    # 递归处理嵌套对象
                    sub_chunks = self.chunk_json_object(value, current_path)
                    chunks.extend(sub_chunks)
                else:
                    # 简单值
                    chunk_text = json.dumps({key: value}, ensure_ascii=False, indent=2)
                    
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=0,  # JSON重新生成，位置信息不准确
                        end_index=len(chunk_text),
                        chunk_type='json_value',
                        metadata={
                            'path': current_path,
                            'value_type': type(value).__name__,
                            'key': key
                        }
                    )
                    chunks.append(chunk)
        
        elif isinstance(obj, list):
            for i, item in enumerate(obj):
                current_path = f"{path}[{i}]"
                
                if isinstance(item, (dict, list)):
                    # 递归处理嵌套对象
                    sub_chunks = self.chunk_json_object(item, current_path)
                    chunks.extend(sub_chunks)
                else:
                    # 简单值
                    chunk_text = json.dumps(item, ensure_ascii=False, indent=2)
                    
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=0,
                        end_index=len(chunk_text),
                        chunk_type='json_value',
                        metadata={
                            'path': current_path,
                            'value_type': type(item).__name__,
                            'index': i
                        }
                    )
                    chunks.append(chunk)
        
        else:
            # 简单值
            chunk_text = json.dumps(obj, ensure_ascii=False, indent=2)
            
            chunk = Chunk(
                text=chunk_text,
                start_index=0,
                end_index=len(chunk_text),
                chunk_type='json_value',
                metadata={
                    'path': path,
                    'value_type': type(obj).__name__
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def chunk_json_by_size(self, json_text: str) -> List[Chunk]:
        """按大小分块JSON"""
        try:
            obj = self.parse_json(json_text)
            if obj is None:
                return []
            
            # 将JSON转换为字符串
            json_str = json.dumps(obj, ensure_ascii=False, indent=2)
            
            chunks = []
            start = 0
            
            while start < len(json_str):
                end = min(start + self.max_chunk_size, len(json_str))
                chunk_text = json_str[start:end]
                
                # 尝试在完整的JSON对象处分割
                if end < len(json_str):
                    # 找到最后一个完整的对象结束位置
                    last_complete = self._find_last_complete_json(chunk_text)
                    if last_complete > 0:
                        chunk_text = chunk_text[:last_complete]
                        end = start + last_complete
                
                chunk = Chunk(
                    text=chunk_text,
                    start_index=start,
                    end_index=start + len(chunk_text),
                    chunk_type='json_size',
                    metadata={
                        'chunk_size': len(chunk_text),
                        'is_complete': self._is_complete_json(chunk_text)
                    }
                )
                chunks.append(chunk)
                start = end
            
            return chunks
            
        except Exception as e:
            print(f"JSON分块错误: {e}")
            return []
    
    def _find_last_complete_json(self, text: str) -> int:
        """找到最后一个完整JSON对象的结束位置"""
        # 简化实现：寻找平衡的括号
        brace_count = 0
        bracket_count = 0
        last_complete = 0
        
        for i, char in enumerate(text):
            if char == '{':
                brace_count += 1
            elif char == '}':
                brace_count -= 1
                if brace_count == 0:
                    last_complete = i + 1
            elif char == '[':
                bracket_count += 1
            elif char == ']':
                bracket_count -= 1
                if bracket_count == 0:
                    last_complete = i + 1
        
        return last_complete
    
    def _is_complete_json(self, text: str) -> bool:
        """检查是否为完整的JSON"""
        try:
            json.loads(text)
            return True
        except:
            return False
    
    def chunk(self, text: str) -> List[Chunk]:
        """JSON分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        if self.chunk_by_structure:
            # 按结构分块
            try:
                obj = self.parse_json(text)
                if obj is not None:
                    chunks = self.chunk_json_object(obj)
                else:
                    chunks = []
            except:
                chunks = []
        else:
            # 按大小分块
            chunks = self.chunk_json_by_size(text)
        
        self.chunks = chunks
        return chunks


class XMLChunker(BasicChunker):
    """XML分块器"""
    
    def __init__(self, max_chunk_size: int = 2000,
                 chunk_by_elements: bool = True,
                 preserve_attributes: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.chunk_by_elements = chunk_by_elements
        self.preserve_attributes = preserve_attributes
    
    def parse_xml(self, xml_text: str) -> Optional[ET.Element]:
        """解析XML"""
        try:
            return ET.fromstring(xml_text)
        except ET.ParseError as e:
            print(f"XML解析错误: {e}")
            return None
    
    def element_to_string(self, element: ET.Element) -> str:
        """将XML元素转换为字符串"""
        if self.preserve_attributes:
            # 保留属性
            attr_str = ""
            if element.attrib:
                attr_str = " " + " ".join([f'{k}="{v}"' for k, v in element.attrib.items()])
            
            if element.text and element.text.strip():
                return f"<{element.tag}{attr_str}>{element.text}</{element.tag}>"
            else:
                return f"<{element.tag}{attr_str}></{element.tag}>"
        else:
            # 不保留属性
            if element.text and element.text.strip():
                return f"<{element.tag}>{element.text}</{element.tag}>"
            else:
                return f"<{element.tag}></{element.tag}>"
    
    def chunk_xml_elements(self, root: ET.Element, parent_path: str = "") -> List[Chunk]:
        """递归分块XML元素"""
        chunks = []
        
        current_path = f"{parent_path}/{root.tag}" if parent_path else root.tag
        
        # 处理当前元素
        element_text = self.element_to_string(root)
        
        chunk = Chunk(
            text=element_text,
            start_index=0,  # 重新生成，位置信息不准确
            end_index=len(element_text),
            chunk_type='xml_element',
            metadata={
                'tag': root.tag,
                'path': current_path,
                'has_children': len(root) > 0,
                'has_attributes': len(root.attrib) > 0
            }
        )
        chunks.append(chunk)
        
        # 递归处理子元素
        for child in root:
            child_chunks = self.chunk_xml_elements(child, current_path)
            chunks.extend(child_chunks)
        
        return chunks
    
    def chunk_xml_by_size(self, xml_text: str) -> List[Chunk]:
        """按大小分块XML"""
        chunks = []
        start = 0
        
        while start < len(xml_text):
            end = min(start + self.max_chunk_size, len(xml_text))
            chunk_text = xml_text[start:end]
            
            # 尝试在完整的XML元素处分割
            if end < len(xml_text):
                # 找到最后一个完整的元素结束位置
                last_complete = self._find_last_complete_xml(chunk_text)
                if last_complete > 0:
                    chunk_text = chunk_text[:last_complete]
                    end = start + last_complete
            
            chunk = Chunk(
                text=chunk_text,
                start_index=start,
                end_index=start + len(chunk_text),
                chunk_type='xml_size',
                metadata={
                    'chunk_size': len(chunk_text),
                    'is_complete': self._is_complete_xml(chunk_text)
                }
            )
            chunks.append(chunk)
            start = end
        
        return chunks
    
    def _find_last_complete_xml(self, text: str) -> int:
        """找到最后一个完整XML元素的结束位置"""
        # 简化实现：寻找平衡的标签
        tag_stack = []
        last_complete = 0
        
        # 使用正则表达式匹配标签
        tag_pattern = r'<([^>]+)>'
        matches = list(re.finditer(tag_pattern, text))
        
        for match in matches:
            tag_content = match.group(1)
            
            if not tag_content.startswith('/'):
                # 开始标签
                tag_name = tag_content.split()[0]  # 处理属性
                tag_stack.append(tag_name)
            else:
                # 结束标签
                end_tag_name = tag_content[1:]
                if tag_stack and tag_stack[-1] == end_tag_name:
                    tag_stack.pop()
                    if not tag_stack:
                        last_complete = match.end()
        
        return last_complete
    
    def _is_complete_xml(self, text: str) -> bool:
        """检查是否为完整的XML"""
        try:
            ET.fromstring(text)
            return True
        except:
            return False
    
    def chunk(self, text: str) -> List[Chunk]:
        """XML分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        if self.chunk_by_elements:
            # 按元素分块
            try:
                root = self.parse_xml(text)
                if root is not None:
                    chunks = self.chunk_xml_elements(root)
                else:
                    chunks = []
            except:
                chunks = []
        else:
            # 按大小分块
            chunks = self.chunk_xml_by_size(text)
        
        self.chunks = chunks
        return chunks


class LogChunker(BasicChunker):
    """日志分块器"""
    
    def __init__(self, max_entries_per_chunk: int = 100,
                 chunk_by_time: bool = True,
                 time_window_minutes: int = 60):
        super().__init__()
        self.max_entries_per_chunk = max_entries_per_chunk
        self.chunk_by_time = chunk_by_time
        self.time_window_minutes = time_window_minutes
    
    def parse_log_entry(self, log_line: str) -> Optional[Dict[str, Any]]:
        """解析日志条目"""
        # 常见日志格式
        patterns = [
            # 标准格式: 2024-01-01 12:00:00 [INFO] Message
            r'^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\s+\[([^\]]+)\]\s+(.+)$',
            # 简单格式: [INFO] Message
            r'^\[([^\]]+)\]\s+(.+)$',
            # Apache格式: 127.0.0.1 - - [01/Jan/2024:12:00:00 +0000] "GET / HTTP/1.1" 200 1234
            r'^(\S+) \S+ \S+ \[([^\]]+)\] "([^"]+)" (\d+) (\d+)$',
        ]
        
        for pattern in patterns:
            match = re.match(pattern, log_line.strip())
            if match:
                groups = match.groups()
                
                if len(groups) >= 3:
                    return {
                        'timestamp': groups[0],
                        'level': groups[1] if '[' in groups[1] else 'INFO',
                        'message': groups[2] if len(groups) > 2 else groups[1],
                        'raw_line': log_line.strip()
                    }
                elif len(groups) == 2:
                    return {
                        'timestamp': datetime.now().isoformat(),
                        'level': groups[0],
                        'message': groups[1],
                        'raw_line': log_line.strip()
                    }
        
        # 如果都不匹配，返回简单解析
        return {
            'timestamp': datetime.now().isoformat(),
            'level': 'UNKNOWN',
            'message': log_line.strip(),
            'raw_line': log_line.strip()
        }
    
    def chunk_logs_by_count(self, log_text: str) -> List[Chunk]:
        """按数量分块日志"""
        lines = log_text.strip().split('\n')
        chunks = []
        
        for i in range(0, len(lines), self.max_entries_per_chunk):
            chunk_lines = lines[i:i + self.max_entries_per_chunk]
            chunk_text = '\n'.join(chunk_lines)
            
            # 解析日志条目
            entries = [self.parse_log_entry(line) for line in chunk_lines]
            valid_entries = [e for e in entries if e]
            
            chunk = Chunk(
                text=chunk_text,
                start_index=log_text.find(chunk_lines[0]) if chunk_lines else 0,
                end_index=log_text.find(chunk_lines[-1]) + len(chunk_lines[-1]) if chunk_lines else 0,
                chunk_type='log_entries',
                metadata={
                    'entry_count': len(chunk_lines),
                    'valid_entries': len(valid_entries),
                    'start_index': i,
                    'end_index': i + len(chunk_lines) - 1,
                    'log_levels': list(set(entry.get('level', 'UNKNOWN') for entry in valid_entries))
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def chunk_logs_by_time(self, log_text: str) -> List[Chunk]:
        """按时间分块日志"""
        lines = log_text.strip().split('\n')
        
        # 解析所有日志条目
        entries = []
        for line in lines:
            entry = self.parse_log_entry(line)
            if entry:
                try:
                    # 尝试解析时间戳
                    if 'timestamp' in entry:
                        entry['parsed_time'] = self._parse_timestamp(entry['timestamp'])
                    entries.append(entry)
                except:
                    entries.append(entry)
        
        if not entries:
            return []
        
        # 按时间排序
        entries.sort(key=lambda x: x.get('parsed_time', datetime.now()))
        
        chunks = []
        current_chunk = []
        current_time = None
        
        for entry in entries:
            entry_time = entry.get('parsed_time', datetime.now())
            
            if current_time is None:
                current_time = entry_time
            
            # 检查时间窗口
            time_diff = (entry_time - current_time).total_seconds() / 60
            
            if time_diff > self.time_window_minutes or len(current_chunk) >= self.max_entries_per_chunk:
                # 创建新块
                if current_chunk:
                    chunk_text = '\n'.join([e['raw_line'] for e in current_chunk])
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=log_text.find(current_chunk[0]['raw_line']),
                        end_index=log_text.find(current_chunk[-1]['raw_line']) + len(current_chunk[-1]['raw_line']),
                        chunk_type='log_time_window',
                        metadata={
                            'entry_count': len(current_chunk),
                            'time_window_minutes': self.time_window_minutes,
                            'start_time': current_chunk[0].get('timestamp', ''),
                            'end_time': current_chunk[-1].get('timestamp', ''),
                            'log_levels': list(set(e.get('level', 'UNKNOWN') for e in current_chunk))
                        }
                    )
                    chunks.append(chunk)
                
                current_chunk = [entry]
                current_time = entry_time
            else:
                current_chunk.append(entry)
        
        # 处理最后一个块
        if current_chunk:
            chunk_text = '\n'.join([e['raw_line'] for e in current_chunk])
            chunk = Chunk(
                text=chunk_text,
                start_index=log_text.find(current_chunk[0]['raw_line']),
                end_index=log_text.find(current_chunk[-1]['raw_line']) + len(current_chunk[-1]['raw_line']),
                chunk_type='log_time_window',
                metadata={
                    'entry_count': len(current_chunk),
                    'time_window_minutes': self.time_window_minutes,
                    'start_time': current_chunk[0].get('timestamp', ''),
                    'end_time': current_chunk[-1].get('timestamp', ''),
                    'log_levels': list(set(e.get('level', 'UNKNOWN') for e in current_chunk))
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def _parse_timestamp(self, timestamp_str: str) -> datetime:
        """解析时间戳"""
        formats = [
            '%Y-%m-%d %H:%M:%S',
            '%Y-%m-%d %H:%M:%S.%f',
            '%d/%b/%Y:%H:%M:%S %z',
            '%Y-%m-%dT%H:%M:%S',
            '%Y-%m-%dT%H:%M:%S.%f',
            '%Y-%m-%dT%H:%M:%SZ',
            '%Y-%m-%dT%H:%M:%S.%fZ',
        ]
        
        for fmt in formats:
            try:
                return datetime.strptime(timestamp_str, fmt)
            except ValueError:
                continue
        
        # 如果都失败，返回当前时间
        return datetime.now()
    
    def chunk(self, text: str) -> List[Chunk]:
        """日志分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        if self.chunk_by_time:
            # 按时间分块
            chunks = self.chunk_logs_by_time(text)
        else:
            # 按数量分块
            chunks = self.chunk_logs_by_count(text)
        
        self.chunks = chunks
        return chunks


class CodeDocumentationChunker(BasicChunker):
    """代码文档分块器"""
    
    def __init__(self, max_chunk_size: int = 1500,
                 separate_comments: bool = True,
                 chunk_by_functions: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.separate_comments = separate_comments
        self.chunk_by_functions = chunk_by_functions
    
    def extract_docstring(self, text: str) -> List[Chunk]:
        """提取文档字符串"""
        chunks = []
        
        # Python文档字符串模式
        docstring_patterns = [
            r'""".*?"""',  # 三重双引号
            r"'''.*?'''",  # 三重单引号
            r'""".*?$',    # 未闭合的三重双引号
            r"'''.*?$",    # 未闭合的三重单引号
        ]
        
        for pattern in docstring_patterns:
            matches = re.finditer(pattern, text, re.DOTALL)
            for match in matches:
                docstring = match.group()
                
                chunk = Chunk(
                    text=docstring,
                    start_index=match.start(),
                    end_index=match.end(),
                    chunk_type='docstring',
                    metadata={
                        'docstring_type': 'triple_quotes',
                        'length': len(docstring)
                    }
                )
                chunks.append(chunk)
        
        return chunks
    
    def extract_comments(self, text: str) -> List[Chunk]:
        """提取注释"""
        if not self.separate_comments:
            return []
        
        chunks = []
        
        # 单行注释
        single_line_comments = re.finditer(r'#.*$', text, re.MULTILINE)
        for match in single_line_comments:
            comment = match.group()
            
            chunk = Chunk(
                text=comment,
                start_index=match.start(),
                end_index=match.end(),
                chunk_type='comment',
                metadata={
                    'comment_type': 'single_line',
                    'length': len(comment)
                }
            )
            chunks.append(chunk)
        
        # 多行注释（简化处理）
        multiline_comments = re.finditer(r'""".*?"""', text, re.DOTALL)
        for match in multiline_comments:
            comment = match.group()
            
            chunk = Chunk(
                text=comment,
                start_index=match.start(),
                end_index=match.end(),
                chunk_type='comment',
                metadata={
                    'comment_type': 'multiline',
                    'length': len(comment)
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def extract_functions(self, text: str) -> List[Chunk]:
        """提取函数"""
        if not self.chunk_by_functions:
            return []
        
        chunks = []
        
        # 函数定义模式
        function_pattern = r'def\s+(\w+)\s*\([^)]*\)\s*:'
        matches = re.finditer(function_pattern, text)
        
        for match in matches:
            func_start = match.start()
            func_name = match.group(1)
            
            # 找到函数结束位置（简化处理）
            lines = text[func_start:].split('\n')
            func_lines = []
            indent_level = None
            
            for line in lines:
                if line.strip() == '':
                    func_lines.append(line)
                    continue
                
                current_indent = len(line) - len(line.lstrip())
                
                if indent_level is None:
                    indent_level = current_indent
                    func_lines.append(line)
                elif current_indent > indent_level:
                    func_lines.append(line)
                else:
                    break
            
            func_text = '\n'.join(func_lines)
            
            chunk = Chunk(
                text=func_text,
                start_index=func_start,
                end_index=func_start + len(func_text),
                chunk_type='function',
                metadata={
                    'function_name': func_name,
                    'line_count': len(func_lines),
                    'has_docstring': '"""' in func_text or "'''" in func_text
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def chunk(self, text: str) -> List[Chunk]:
        """代码文档分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        chunks = []
        
        # 提取文档字符串
        docstring_chunks = self.extract_docstring(text)
        chunks.extend(docstring_chunks)
        
        # 提取注释
        comment_chunks = self.extract_comments(text)
        chunks.extend(comment_chunks)
        
        # 提取函数
        function_chunks = self.extract_functions(text)
        chunks.extend(function_chunks)
        
        # 如果没有找到任何结构，按行分块
        if not chunks:
            lines = text.split('\n')
            current_chunk = []
            current_start = 0
            
            for line in lines:
                current_chunk.append(line)
                
                if len('\n'.join(current_chunk)) > self.max_chunk_size:
                    chunk_text = '\n'.join(current_chunk)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='code_lines',
                        metadata={'line_count': len(current_chunk)}
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 1
            
            if current_chunk:
                chunk_text = '\n'.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='code_lines',
                    metadata={'line_count': len(current_chunk)}
                )
                chunks.append(chunk)
        
        self.chunks = chunks
        return chunks


def main():
    """主函数 - 运行特殊格式分块演示"""
    print("=" * 60)
    print("特殊格式分块方法演示")
    print("=" * 60)
    
    # 1. 表格分块演示
    print("\n" + "=" * 40)
    print("1. 表格分块演示")
    print("=" * 40)
    
    # CSV表格
    csv_table = """name,age,city,occupation
Alice,25,New York,Engineer
Bob,30,Los Angeles,Doctor
Charlie,35,Chicago,Teacher
David,28,Houston,Developer
Eve,32,Phoenix,Designer
Frank,29,Philadelphia,Manager
Grace,31,San Antonio,Consultant
Henry,27,San Diego,Architect"""
    
    csv_chunker = TableChunker(max_rows_per_chunk=3)
    chunks = csv_chunker.chunk(csv_table)
    csv_chunker.print_chunks(max_chunks=3)
    
    # Markdown表格
    markdown_table = """| 姓名 | 年龄 | 城市 | 职业 |
|------|------|------|------|
| Alice | 25 | New York | Engineer |
| Bob | 30 | Los Angeles | Doctor |
| Charlie | 35 | Chicago | Teacher |
| David | 28 | Houston | Developer |
| Eve | 32 | Phoenix | Designer |
| Frank | 29 | Philadelphia | Manager |
| Grace | 31 | San Antonio | Consultant |
| Henry | 27 | San Diego | Architect |"""
    
    md_chunker = TableChunker(max_rows_per_chunk=3)
    chunks = md_chunker.chunk(markdown_table)
    md_chunker.print_chunks(max_chunks=2)
    
    # 2. JSON分块演示
    print("\n" + "=" * 40)
    print("2. JSON分块演示")
    print("=" * 40)
    
    json_data = """
{
    "users": [
        {
            "id": 1,
            "name": "Alice",
            "email": "alice@example.com",
            "profile": {
                "age": 25,
                "city": "New York",
                "interests": ["reading", "hiking", "photography"]
            }
        },
        {
            "id": 2,
            "name": "Bob",
            "email": "bob@example.com",
            "profile": {
                "age": 30,
                "city": "Los Angeles",
                "interests": ["music", "travel", "cooking"]
            }
        }
    ],
    "settings": {
        "theme": "dark",
        "language": "en",
        "notifications": true
    },
    "version": "1.0.0"
}"""
    
    json_chunker = JSONChunker(chunk_by_structure=True)
    chunks = json_chunker.chunk(json_data)
    json_chunker.print_chunks(max_chunks=3)
    
    # 3. XML分块演示
    print("\n" + "=" * 40)
    print("3. XML分块演示")
    print("=" * 40)
    
    xml_data = """<?xml version="1.0" encoding="UTF-8"?>
<library>
    <book id="1">
        <title>Python Programming</title>
        <author>John Doe</author>
        <year>2023</year>
        <price>29.99</price>
        <categories>
            <category>Programming</category>
            <category>Python</category>
        </categories>
    </book>
    <book id="2">
        <title>Machine Learning Basics</title>
        <author>Jane Smith</author>
        <year>2023</year>
        <price>39.99</price>
        <categories>
            <category>Machine Learning</category>
            <category>AI</category>
        </categories>
    </book>
</library>"""
    
    xml_chunker = XMLChunker(chunk_by_elements=True)
    chunks = xml_chunker.chunk(xml_data)
    xml_chunker.print_chunks(max_chunks=3)
    
    # 4. 日志分块演示
    print("\n" + "=" * 40)
    print("4. 日志分块演示")
    print("=" * 40)
    
    log_data = """2024-01-01 10:00:00 [INFO] Application started
2024-01-01 10:00:01 [INFO] Loading configuration
2024-01-01 10:00:02 [INFO] Database connection established
2024-01-01 10:00:03 [DEBUG] Query executed: SELECT * FROM users
2024-01-01 10:00:04 [INFO] User authentication successful
2024-01-01 10:00:05 [WARNING] High memory usage detected
2024-01-01 10:00:06 [ERROR] Failed to connect to external service
2024-01-01 10:00:07 [INFO] Retrying connection...
2024-01-01 10:00:08 [INFO] Connection restored
2024-01-01 10:00:09 [INFO] Processing user request
2024-01-01 10:00:10 [DEBUG] Request processed successfully
2024-01-01 10:00:11 [INFO] Sending response
2024-01-01 10:00:12 [INFO] Request completed"""
    
    log_chunker = LogChunker(max_entries_per_chunk=5, chunk_by_time=True, time_window_minutes=5)
    chunks = log_chunker.chunk(log_data)
    log_chunker.print_chunks(max_chunks=3)
    
    # 5. 代码文档分块演示
    print("\n" + "=" * 40)
    print("5. 代码文档分块演示")
    print("=" * 40)
    
    code_data = '''"""
Machine Learning Utilities Module

This module provides various utility functions for machine learning tasks.
"""

import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

class DataProcessor:
    """
    Data processing utilities for machine learning.
    
    This class provides methods for data preprocessing, cleaning,
    and transformation tasks commonly needed in ML workflows.
    """
    
    def __init__(self, random_state=42):
        """
        Initialize the DataProcessor.
        
        Args:
            random_state (int): Random seed for reproducibility
        """
        self.random_state = random_state
        np.random.seed(random_state)
    
    def normalize_data(self, X):
        """
        Normalize the input data using standard scaling.
        
        Args:
            X (np.ndarray): Input data to normalize
            
        Returns:
            np.ndarray: Normalized data
        """
        from sklearn.preprocessing import StandardScaler
        
        # This is a simple implementation
        scaler = StandardScaler()
        return scaler.fit_transform(X)
    
    def split_data(self, X, y, test_size=0.2):
        """
        Split data into training and testing sets.
        
        Args:
            X (np.ndarray): Feature data
            y (np.ndarray): Target data
            test_size (float): Proportion of test data
            
        Returns:
            tuple: (X_train, X_test, y_train, y_test)
        """
        return train_test_split(X, y, test_size=test_size, 
                               random_state=self.random_state)

def calculate_accuracy(y_true, y_pred):
    """
    Calculate accuracy score.
    
    Args:
        y_true (np.ndarray): True labels
        y_pred (np.ndarray): Predicted labels
        
    Returns:
        float: Accuracy score
    """
    return accuracy_score(y_true, y_pred)

# Utility functions
def load_data(filepath):
    """
    Load data from file.
    
    Args:
        filepath (str): Path to data file
        
    Returns:
        np.ndarray: Loaded data
    """
    # Implementation would go here
    pass
'''
    
    code_doc_chunker = CodeDocumentationChunker(max_chunk_size=500)
    chunks = code_doc_chunker.chunk(code_data)
    code_doc_chunker.print_chunks(max_chunks=3)
    
    # 保存结果
    print("\n" + "=" * 40)
    print("保存分块结果")
    print("=" * 40)
    csv_chunker.save_chunks("csv_table_chunks.json")
    json_chunker.save_chunks("json_chunks.json")
    xml_chunker.save_chunks("xml_chunks.json")
    log_chunker.save_chunks("log_chunks.json")
    code_doc_chunker.save_chunks("code_doc_chunks.json")
    
    print("\n" + "=" * 60)
    print("特殊格式分块演示完成！")
    print("=" * 60)


# =============================================================================
# 新增文档格式分块方法
# =============================================================================

class YAMLChunker(BasicChunker):
    """YAML文档分块器"""
    
    def __init__(self, max_chunk_size: int = 1000, preserve_structure: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.preserve_structure = preserve_structure
        
        # 尝试导入YAML解析库
        try:
            import yaml
            self.yaml_available = True
        except ImportError:
            print("警告: 未安装PyYAML库，YAML分块器将使用简单解析作为备选")
            self.yaml_available = False
    
    def parse_yaml_structure(self, text: str) -> Dict:
        """解析YAML结构"""
        if not self.yaml_available:
            # 使用简单的键值对解析
            return self._simple_yaml_parse(text)
        
        try:
            import yaml
            return yaml.safe_load(text)
        except Exception as e:
            print(f"YAML解析错误: {e}")
            return self._simple_yaml_parse(text)
    
    def _simple_yaml_parse(self, text: str) -> Dict:
        """简单的YAML解析"""
        result = {}
        lines = text.split('\n')
        current_key = None
        current_value = []
        
        for line in lines:
            line = line.strip()
            if not line or line.startswith('#'):
                continue
            
            # 检查是否是键值对
            if ':' in line and not line.startswith('-'):
                if current_key is not None:
                    result[current_key] = '\n'.join(current_value)
                
                parts = line.split(':', 1)
                current_key = parts[0].strip()
                value_part = parts[1].strip()
                
                if value_part:
                    result[current_key] = value_part
                    current_key = None
                    current_value = []
                else:
                    current_value = []
            elif current_key is not None:
                current_value.append(line)
        
        # 处理最后一个键值对
        if current_key is not None:
            result[current_key] = '\n'.join(current_value)
        
        return result
    
    def chunk_by_structure(self, data: Dict, path: str = "") -> List[Chunk]:
        """按结构分块YAML数据"""
        chunks = []
        
        for key, value in data.items():
            current_path = f"{path}.{key}" if path else key
            
            if isinstance(value, dict):
                # 递归处理嵌套字典
                sub_chunks = self.chunk_by_structure(value, current_path)
                chunks.extend(sub_chunks)
            elif isinstance(value, list):
                # 处理列表
                for i, item in enumerate(value):
                    item_path = f"{current_path}[{i}]"
                    if isinstance(item, (dict, list)):
                        sub_chunks = self.chunk_by_structure(item, item_path)
                        chunks.extend(sub_chunks)
                    else:
                        chunk_text = f"{item_path}: {item}"
                        chunk = Chunk(
                            text=chunk_text,
                            start_index=0,  # YAML位置信息难以精确获取
                            end_index=len(chunk_text),
                            chunk_type='yaml',
                            metadata={
                                'yaml_path': item_path,
                                'data_type': type(item).__name__,
                                'preserve_structure': self.preserve_structure
                            }
                        )
                        chunks.append(chunk)
            else:
                # 处理简单值
                chunk_text = f"{current_path}: {value}"
                chunk = Chunk(
                    text=chunk_text,
                    start_index=0,
                    end_index=len(chunk_text),
                    chunk_type='yaml',
                    metadata={
                        'yaml_path': current_path,
                        'data_type': type(value).__name__,
                        'preserve_structure': self.preserve_structure
                    }
                )
                chunks.append(chunk)
        
        return chunks
    
    def chunk(self, text: str) -> List[Chunk]:
        """YAML文档分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        try:
            # 解析YAML结构
            yaml_data = self.parse_yaml_structure(text)
            
            # 按结构分块
            chunks = self.chunk_by_structure(yaml_data)
            
            # 检查块大小
            final_chunks = []
            for chunk in chunks:
                if len(chunk.text) > self.max_chunk_size:
                    # 如果太大，进一步分割
                    sub_chunks = self._split_large_chunk(chunk.text, chunk.start_index)
                    final_chunks.extend(sub_chunks)
                else:
                    final_chunks.append(chunk)
            
            self.chunks = final_chunks
            return self.chunks
            
        except Exception as e:
            print(f"YAML分块错误: {e}")
            # 回退到按行分块
            lines = text.split('\n')
            chunks = []
            current_chunk = []
            current_start = 0
            
            for line in lines:
                current_chunk.append(line)
                
                if len('\n'.join(current_chunk)) > self.max_chunk_size:
                    chunk_text = '\n'.join(current_chunk)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='yaml_fallback',
                        metadata={'error': str(e)}
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 1
            
            if current_chunk:
                chunk_text = '\n'.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='yaml_fallback',
                    metadata={'error': str(e)}
                )
                chunks.append(chunk)
            
            self.chunks = chunks
            return self.chunks
    
    def _split_large_chunk(self, chunk_text: str, start_pos: int) -> List[Chunk]:
        """分割大块"""
        words = chunk_text.split()
        sub_chunks = []
        current_chunk = []
        current_length = 0
        current_start = start_pos
        
        for word in words:
            if current_length + len(word) + 1 > self.max_chunk_size:
                if current_chunk:
                    sub_chunk_text = ' '.join(current_chunk)
                    chunk = Chunk(
                        text=sub_chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(sub_chunk_text),
                        chunk_type='yaml',
                        metadata={'is_sub_chunk': True}
                    )
                    sub_chunks.append(chunk)
                    current_chunk = []
                    current_start += len(sub_chunk_text) + 1
                    current_length = 0
            
            current_chunk.append(word)
            current_length += len(word) + 1
        
        if current_chunk:
            sub_chunk_text = ' '.join(current_chunk)
            chunk = Chunk(
                text=sub_chunk_text,
                start_index=current_start,
                end_index=current_start + len(sub_chunk_text),
                chunk_type='yaml',
                metadata={'is_sub_chunk': True}
            )
            sub_chunks.append(chunk)
        
        return sub_chunks


class LaTeXChunker(BasicChunker):
    """LaTeX文档分块器"""
    
    def __init__(self, max_chunk_size: int = 2000, preserve_structure: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.preserve_structure = preserve_structure
    
    def parse_latex_structure(self, text: str) -> List[Dict]:
        """解析LaTeX文档结构"""
        structure = []
        lines = text.split('\n')
        current_section = None
        current_content = []
        
        # LaTeX环境匹配模式
        section_patterns = [
            r'\\section{([^}]+)}',
            r'\\subsection{([^}]+)}',
            r'\\subsubsection{([^}]+)}',
            r'\\chapter{([^}]+)}',
            r'\\part{([^}]+)}'
        ]
        
        for line_num, line in enumerate(lines):
            line = line.strip()
            
            # 检查是否是章节标题
            section_match = None
            section_type = None
            
            for pattern in section_patterns:
                match = re.match(pattern, line)
                if match:
                    section_match = match
                    if 'section' in pattern:
                        section_type = 'section'
                    elif 'subsection' in pattern:
                        section_type = 'subsection'
                    elif 'subsubsection' in pattern:
                        section_type = 'subsubsection'
                    elif 'chapter' in pattern:
                        section_type = 'chapter'
                    elif 'part' in pattern:
                        section_type = 'part'
                    break
            
            if section_match:
                # 保存之前的章节
                if current_section is not None:
                    current_section['content'] = '\n'.join(current_content)
                    structure.append(current_section)
                
                # 开始新章节
                current_section = {
                    'type': section_type,
                    'title': section_match.group(1),
                    'line_number': line_num,
                    'content': []
                }
                current_content = []
            elif current_section is not None:
                current_content.append(line)
        
        # 保存最后一个章节
        if current_section is not None:
            current_section['content'] = '\n'.join(current_content)
            structure.append(current_section)
        
        return structure
    
    def chunk_by_structure(self, structure: List[Dict]) -> List[Chunk]:
        """按结构分块LaTeX文档"""
        chunks = []
        
        for section in structure:
            content = section['content']
            
            # 检查内容大小
            if len(content) > self.max_chunk_size:
                # 如果太大，进一步分割
                sub_chunks = self._split_latex_content(content)
                for i, sub_content in enumerate(sub_chunks):
                    chunk_text = f"% {section['type']}: {section['title']} (Part {i+1})\n{sub_content}"
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=0,  # LaTeX位置信息难以精确获取
                        end_index=len(chunk_text),
                        chunk_type='latex',
                        metadata={
                            'section_type': section['type'],
                            'section_title': section['title'],
                            'line_number': section['line_number'],
                            'part_number': i + 1,
                            'preserve_structure': self.preserve_structure
                        }
                    )
                    chunks.append(chunk)
            else:
                chunk_text = f"% {section['type']}: {section['title']}\n{content}"
                chunk = Chunk(
                    text=chunk_text,
                    start_index=0,
                    end_index=len(chunk_text),
                    chunk_type='latex',
                    metadata={
                        'section_type': section['type'],
                        'section_title': section['title'],
                        'line_number': section['line_number'],
                        'preserve_structure': self.preserve_structure
                    }
                )
                chunks.append(chunk)
        
        return chunks
    
    def _split_latex_content(self, content: str) -> List[str]:
        """分割LaTeX内容"""
        # 按段落分割
        paragraphs = content.split('\n\n')
        chunks = []
        current_chunk = []
        current_length = 0
        
        for para in paragraphs:
            if current_length + len(para) + 2 > self.max_chunk_size:
                if current_chunk:
                    chunks.append('\n\n'.join(current_chunk))
                    current_chunk = []
                    current_length = 0
                else:
                    # 单个段落就太大，按句子分割
                    sentences = re.split(r'[.!?]', para)
                    for sentence in sentences:
                        sentence = sentence.strip()
                        if sentence and len(sentence) > 10:
                            if len(sentence) > self.max_chunk_size:
                                # 按词分割
                                words = sentence.split()
                                word_chunk = []
                                word_length = 0
                                for word in words:
                                    if word_length + len(word) + 1 > self.max_chunk_size:
                                        if word_chunk:
                                            chunks.append(' '.join(word_chunk))
                                            word_chunk = []
                                            word_length = 0
                                    word_chunk.append(word)
                                    word_length += len(word) + 1
                                if word_chunk:
                                    chunks.append(' '.join(word_chunk))
                            else:
                                chunks.append(sentence)
                    continue
            
            current_chunk.append(para)
            current_length += len(para) + 2
        
        if current_chunk:
            chunks.append('\n\n'.join(current_chunk))
        
        return chunks
    
    def chunk(self, text: str) -> List[Chunk]:
        """LaTeX文档分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        try:
            # 解析LaTeX结构
            structure = self.parse_latex_structure(text)
            
            if not structure:
                # 如果没有找到章节结构，按文档整体分块
                if len(text) > self.max_chunk_size:
                    chunks = self._split_latex_content(text)
                    final_chunks = []
                    for i, chunk_content in enumerate(chunks):
                        chunk = Chunk(
                            text=chunk_content,
                            start_index=0,
                            end_index=len(chunk_content),
                            chunk_type='latex',
                            metadata={
                                'no_structure': True,
                                'part_number': i + 1,
                                'preserve_structure': self.preserve_structure
                            }
                        )
                        final_chunks.append(chunk)
                    self.chunks = final_chunks
                    return self.chunks
                else:
                    chunk = Chunk(
                        text=text,
                        start_index=0,
                        end_index=len(text),
                        chunk_type='latex',
                        metadata={'preserve_structure': self.preserve_structure}
                    )
                    self.chunks = [chunk]
                    return self.chunks
            
            # 按结构分块
            chunks = self.chunk_by_structure(structure)
            self.chunks = chunks
            return self.chunks
            
        except Exception as e:
            print(f"LaTeX分块错误: {e}")
            # 回退到按行分块
            lines = text.split('\n')
            chunks = []
            current_chunk = []
            current_start = 0
            
            for line in lines:
                current_chunk.append(line)
                
                if len('\n'.join(current_chunk)) > self.max_chunk_size:
                    chunk_text = '\n'.join(current_chunk)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='latex_fallback',
                        metadata={'error': str(e)}
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 1
            
            if current_chunk:
                chunk_text = '\n'.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='latex_fallback',
                    metadata={'error': str(e)}
                )
                chunks.append(chunk)
            
            self.chunks = chunks
            return self.chunks


class PDFChunker(BasicChunker):
    """PDF文档分块器"""
    
    def __init__(self, max_chunk_size: int = 2000, chunk_by_page: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.chunk_by_page = chunk_by_page
        
        # 尝试导入PDF解析库
        try:
            import PyPDF2
            self.pypdf2_available = True
        except ImportError:
            print("警告: 未安装PyPDF2库，PDF分块器将不可用")
            self.pypdf2_available = False
    
    def extract_text_from_pdf(self, pdf_content) -> str:
        """从PDF内容提取文本"""
        if not self.pypdf2_available:
            raise ImportError("PyPDF2库未安装")
        
        import PyPDF2
        import io
        
        text = ""
        
        try:
            # 假设pdf_content是文件路径或字节流
            if isinstance(pdf_content, str):
                # 文件路径
                with open(pdf_content, 'rb') as file:
                    pdf_reader = PyPDF2.PdfReader(file)
                    for page in pdf_reader.pages:
                        text += page.extract_text() + "\n"
            else:
                # 字节流
                pdf_reader = PyPDF2.PdfReader(io.BytesIO(pdf_content))
                for page in pdf_reader.pages:
                    text += page.extract_text() + "\n"
            
            return text
        except Exception as e:
            print(f"PDF文本提取错误: {e}")
            raise
    
    def chunk_by_pages(self, text: str) -> List[Chunk]:
        """按页分块PDF内容"""
        # 简单的按页分割（假设每页以换页符分隔）
        pages = text.split('\f')  # 换页符
        
        chunks = []
        current_start = 0
        
        for i, page_text in enumerate(pages):
            page_text = page_text.strip()
            if not page_text:
                continue
            
            # 如果单页内容太大，进一步分割
            if len(page_text) > self.max_chunk_size:
                sub_chunks = self._split_large_chunk(page_text, current_start)
                chunks.extend(sub_chunks)
            else:
                chunk = Chunk(
                    text=page_text,
                    start_index=current_start,
                    end_index=current_start + len(page_text),
                    chunk_type='pdf',
                    metadata={
                        'page_number': i + 1,
                        'chunk_by_page': self.chunk_by_page
                    }
                )
                chunks.append(chunk)
            
            current_start += len(page_text) + 1
        
        return chunks
    
    def chunk_by_content(self, text: str) -> List[Chunk]:
        """按内容分块PDF文本"""
        # 按段落分块
        paragraphs = text.split('\n\n')
        chunks = []
        current_chunk = []
        current_start = 0
        
        for para in paragraphs:
            para = para.strip()
            if not para:
                continue
            
            if len('\n\n'.join(current_chunk + [para])) > self.max_chunk_size:
                if current_chunk:
                    chunk_text = '\n\n'.join(current_chunk)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='pdf',
                        metadata={'chunk_by_page': False}
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 2
                
                # 单个段落就太大，进一步分割
                if len(para) > self.max_chunk_size:
                    sub_chunks = self._split_large_chunk(para, current_start)
                    chunks.extend(sub_chunks)
                    current_start += len(para) + 2
                else:
                    current_chunk = [para]
            else:
                current_chunk.append(para)
        
        # 处理剩余内容
        if current_chunk:
            chunk_text = '\n\n'.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='pdf',
                metadata={'chunk_by_page': False}
            )
            chunks.append(chunk)
        
        return chunks
    
    def chunk(self, pdf_input) -> List[Chunk]:
        """PDF文档分块"""
        self.clear()
        
        if not self.pypdf2_available:
            print("错误: PyPDF2库未安装，无法进行PDF分块")
            return self.chunks
        
        try:
            # 提取PDF文本
            text = self.extract_text_from_pdf(pdf_input)
            
            if self.chunk_by_page:
                chunks = self.chunk_by_pages(text)
            else:
                chunks = self.chunk_by_content(text)
            
            self.chunks = chunks
            return self.chunks
            
        except Exception as e:
            print(f"PDF分块错误: {e}")
            return self.chunks
    
    def _split_large_chunk(self, chunk_text: str, start_pos: int) -> List[Chunk]:
        """分割大块"""
        sentences = re.split(r'[.!?]', chunk_text)
        chunks = []
        current_chunk = []
        current_start = start_pos
        
        for sentence in sentences:
            sentence = sentence.strip()
            if not sentence:
                continue
            
            if len('. '.join(current_chunk + [sentence])) > self.max_chunk_size:
                if current_chunk:
                    chunk_text = '. '.join(current_chunk) + '.'
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='pdf',
                        metadata={'is_sub_chunk': True}
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 1
                
                # 单个句子就太大，按词分割
                if len(sentence) > self.max_chunk_size:
                    words = sentence.split()
                    word_chunk = []
                    word_length = 0
                    for word in words:
                        if word_length + len(word) + 1 > self.max_chunk_size:
                            if word_chunk:
                                chunk = Chunk(
                                    text=' '.join(word_chunk),
                                    start_index=current_start,
                                    end_index=current_start + len(' '.join(word_chunk)),
                                    chunk_type='pdf',
                                    metadata={'is_sub_chunk': True}
                                )
                                chunks.append(chunk)
                                word_chunk = []
                                current_start += len(' '.join(word_chunk)) + 1
                                word_length = 0
                        word_chunk.append(word)
                        word_length += len(word) + 1
                    if word_chunk:
                        chunk = Chunk(
                            text=' '.join(word_chunk),
                            start_index=current_start,
                            end_index=current_start + len(' '.join(word_chunk)),
                            chunk_type='pdf',
                            metadata={'is_sub_chunk': True}
                        )
                        chunks.append(chunk)
                else:
                    current_chunk = [sentence]
            else:
                current_chunk.append(sentence)
        
        # 处理剩余内容
        if current_chunk:
            chunk_text = '. '.join(current_chunk) + '.'
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='pdf',
                metadata={'is_sub_chunk': True}
            )
            chunks.append(chunk)
        
        return chunks


class DOCXChunker(BasicChunker):
    """DOCX文档分块器"""
    
    def __init__(self, max_chunk_size: int = 2000, preserve_formatting: bool = False):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.preserve_formatting = preserve_formatting
        
        # 尝试导入DOCX解析库
        try:
            from docx import Document
            self.docx_available = True
        except ImportError:
            print("警告: 未安装python-docx库，DOCX分块器将不可用")
            self.docx_available = False
    
    def extract_text_from_docx(self, docx_path: str) -> str:
        """从DOCX文件提取文本"""
        if not self.docx_available:
            raise ImportError("python-docx库未安装")
        
        from docx import Document
        
        try:
            doc = Document(docx_path)
            text = ""
            
            for paragraph in doc.paragraphs:
                text += paragraph.text + "\n"
            
            # 提取表格文本
            for table in doc.tables:
                for row in table.rows:
                    for cell in row.cells:
                        text += cell.text + "\t"
                    text += "\n"
            
            return text
        except Exception as e:
            print(f"DOCX文本提取错误: {e}")
            raise
    
    def chunk_by_paragraphs(self, text: str) -> List[Chunk]:
        """按段落分块DOCX内容"""
        paragraphs = text.split('\n')
        chunks = []
        current_chunk = []
        current_start = 0
        
        for para in paragraphs:
            para = para.strip()
            if not para:
                continue
            
            if len('\n'.join(current_chunk + [para])) > self.max_chunk_size:
                if current_chunk:
                    chunk_text = '\n'.join(current_chunk)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='docx',
                        metadata={
                            'preserve_formatting': self.preserve_formatting,
                            'chunk_type': 'paragraph'
                        }
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 1
                
                # 单个段落就太大，进一步分割
                if len(para) > self.max_chunk_size:
                    sub_chunks = self._split_large_chunk(para, current_start)
                    chunks.extend(sub_chunks)
                    current_start += len(para) + 1
                else:
                    current_chunk = [para]
            else:
                current_chunk.append(para)
        
        # 处理剩余内容
        if current_chunk:
            chunk_text = '\n'.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='docx',
                metadata={
                    'preserve_formatting': self.preserve_formatting,
                    'chunk_type': 'paragraph'
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def chunk(self, docx_path: str) -> List[Chunk]:
        """DOCX文档分块"""
        self.clear()
        
        if not self.docx_available:
            print("错误: python-docx库未安装，无法进行DOCX分块")
            return self.chunks
        
        try:
            # 提取DOCX文本
            text = self.extract_text_from_docx(docx_path)
            
            # 按段落分块
            chunks = self.chunk_by_paragraphs(text)
            
            self.chunks = chunks
            return self.chunks
            
        except Exception as e:
            print(f"DOCX分块错误: {e}")
            return self.chunks
    
    def _split_large_chunk(self, chunk_text: str, start_pos: int) -> List[Chunk]:
        """分割大块"""
        sentences = re.split(r'[.!?]', chunk_text)
        chunks = []
        current_chunk = []
        current_start = start_pos
        
        for sentence in sentences:
            sentence = sentence.strip()
            if not sentence:
                continue
            
            if len('. '.join(current_chunk + [sentence])) > self.max_chunk_size:
                if current_chunk:
                    chunk_text = '. '.join(current_chunk) + '.'
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='docx',
                        metadata={'is_sub_chunk': True}
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 1
                
                # 单个句子就太大，按词分割
                if len(sentence) > self.max_chunk_size:
                    words = sentence.split()
                    word_chunk = []
                    word_length = 0
                    for word in words:
                        if word_length + len(word) + 1 > self.max_chunk_size:
                            if word_chunk:
                                chunk = Chunk(
                                    text=' '.join(word_chunk),
                                    start_index=current_start,
                                    end_index=current_start + len(' '.join(word_chunk)),
                                    chunk_type='docx',
                                    metadata={'is_sub_chunk': True}
                                )
                                chunks.append(chunk)
                                word_chunk = []
                                current_start += len(' '.join(word_chunk)) + 1
                                word_length = 0
                        word_chunk.append(word)
                        word_length += len(word) + 1
                    if word_chunk:
                        chunk = Chunk(
                            text=' '.join(word_chunk),
                            start_index=current_start,
                            end_index=current_start + len(' '.join(word_chunk)),
                            chunk_type='docx',
                            metadata={'is_sub_chunk': True}
                        )
                        chunks.append(chunk)
                else:
                    current_chunk = [sentence]
            else:
                current_chunk.append(sentence)
        
        # 处理剩余内容
        if current_chunk:
            chunk_text = '. '.join(current_chunk) + '.'
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='docx',
                metadata={'is_sub_chunk': True}
            )
            chunks.append(chunk)
        
        return chunks


class EmailChunker(BasicChunker):
    """邮件线程分块器"""
    
    def __init__(self, max_chunk_size: int = 2000, chunk_by_thread: bool = True):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.chunk_by_thread = chunk_by_thread
    
    def parse_email_structure(self, text: str) -> List[Dict]:
        """解析邮件结构"""
        emails = []
        
        # 邮件分隔符模式
        email_patterns = [
            r'From:.*?Subject:.*?(?=From:|$)',  # 标准邮件格式
            r'-----Original Message-----.*?(?=-----Original Message-----|$)',  # 转发邮件
            r'On.*wrote:.*?(?=On.*wrote:|$)'  # 回复邮件
        ]
        
        for pattern in email_patterns:
            matches = re.finditer(pattern, text, re.DOTALL)
            for match in matches:
                email_text = match.group().strip()
                if email_text:
                    emails.append({
                        'text': email_text,
                        'start_pos': match.start(),
                        'end_pos': match.end()
                    })
        
        # 如果没有找到邮件分隔符，将整个文本作为单个邮件
        if not emails:
            emails.append({
                'text': text,
                'start_pos': 0,
                'end_pos': len(text)
            })
        
        return emails
    
    def extract_email_parts(self, email_text: str) -> Dict:
        """提取邮件的各个部分"""
        parts = {
            'headers': '',
            'body': '',
            'signature': '',
            'attachments': []
        }
        
        lines = email_text.split('\n')
        in_body = False
        in_signature = False
        
        for line in lines:
            line = line.strip()
            
            # 检查是否是邮件头
            if not in_body and re.match(r'^(From|To|Subject|Date|Cc|Bcc):', line):
                parts['headers'] += line + '\n'
            # 检查是否开始正文
            elif not in_body and line and not re.match(r'^(From|To|Subject|Date|Cc|Bcc):', line):
                in_body = True
                parts['body'] += line + '\n'
            # 检查是否是签名
            elif in_body and (line.startswith('--') or line.startswith('Best regards') or 
                             line.startswith('Sincerely') or line.startswith('Thanks')):
                in_signature = True
                parts['signature'] += line + '\n'
            elif in_signature:
                parts['signature'] += line + '\n'
            elif in_body:
                parts['body'] += line + '\n'
        
        return parts
    
    def chunk_by_emails(self, emails: List[Dict]) -> List[Chunk]:
        """按邮件分块"""
        chunks = []
        
        for email in emails:
            email_parts = self.extract_email_parts(email['text'])
            
            # 创建邮件头块
            if email_parts['headers']:
                header_chunk = Chunk(
                    text=email_parts['headers'],
                    start_index=email['start_pos'],
                    end_index=email['start_pos'] + len(email_parts['headers']),
                    chunk_type='email_headers',
                    metadata={
                        'email_type': 'headers',
                        'chunk_by_thread': self.chunk_by_thread
                    }
                )
                chunks.append(header_chunk)
            
            # 处理邮件正文
            if email_parts['body']:
                body_text = email_parts['body'].strip()
                if len(body_text) > self.max_chunk_size:
                    # 如果正文太大，进一步分割
                    sub_chunks = self._split_large_chunk(body_text, 
                                                         email['start_pos'] + len(email_parts['headers']))
                    chunks.extend(sub_chunks)
                else:
                    body_chunk = Chunk(
                        text=body_text,
                        start_index=email['start_pos'] + len(email_parts['headers']),
                        end_index=email['start_pos'] + len(email_parts['headers']) + len(body_text),
                        chunk_type='email_body',
                        metadata={
                            'email_type': 'body',
                            'chunk_by_thread': self.chunk_by_thread
                        }
                    )
                    chunks.append(body_chunk)
            
            # 创建签名块
            if email_parts['signature']:
                signature_chunk = Chunk(
                    text=email_parts['signature'],
                    start_index=email['end_pos'] - len(email_parts['signature']),
                    end_index=email['end_pos'],
                    chunk_type='email_signature',
                    metadata={
                        'email_type': 'signature',
                        'chunk_by_thread': self.chunk_by_thread
                    }
                )
                chunks.append(signature_chunk)
        
        return chunks
    
    def chunk(self, text: str) -> List[Chunk]:
        """邮件线程分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        try:
            # 解析邮件结构
            emails = self.parse_email_structure(text)
            
            if self.chunk_by_thread:
                # 按邮件线程分块
                chunks = self.chunk_by_emails(emails)
            else:
                # 按内容分块（不区分邮件）
                chunks = self._chunk_by_content(text)
            
            self.chunks = chunks
            return self.chunks
            
        except Exception as e:
            print(f"邮件分块错误: {e}")
            # 回退到按行分块
            lines = text.split('\n')
            chunks = []
            current_chunk = []
            current_start = 0
            
            for line in lines:
                current_chunk.append(line)
                
                if len('\n'.join(current_chunk)) > self.max_chunk_size:
                    chunk_text = '\n'.join(current_chunk)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='email_fallback',
                        metadata={'error': str(e)}
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 1
            
            if current_chunk:
                chunk_text = '\n'.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='email_fallback',
                    metadata={'error': str(e)}
                )
                chunks.append(chunk)
            
            self.chunks = chunks
            return self.chunks
    
    def _chunk_by_content(self, text: str) -> List[Chunk]:
        """按内容分块邮件文本"""
        # 按段落分块
        paragraphs = text.split('\n\n')
        chunks = []
        current_chunk = []
        current_start = 0
        
        for para in paragraphs:
            para = para.strip()
            if not para:
                continue
            
            if len('\n\n'.join(current_chunk + [para])) > self.max_chunk_size:
                if current_chunk:
                    chunk_text = '\n\n'.join(current_chunk)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='email_content',
                        metadata={'chunk_by_thread': False}
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 2
                
                # 单个段落就太大，进一步分割
                if len(para) > self.max_chunk_size:
                    sub_chunks = self._split_large_chunk(para, current_start)
                    chunks.extend(sub_chunks)
                    current_start += len(para) + 2
                else:
                    current_chunk = [para]
            else:
                current_chunk.append(para)
        
        # 处理剩余内容
        if current_chunk:
            chunk_text = '\n\n'.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='email_content',
                metadata={'chunk_by_thread': False}
            )
            chunks.append(chunk)
        
        return chunks
    
    def _split_large_chunk(self, chunk_text: str, start_pos: int) -> List[Chunk]:
        """分割大块"""
        sentences = re.split(r'[.!?]', chunk_text)
        chunks = []
        current_chunk = []
        current_start = start_pos
        
        for sentence in sentences:
            sentence = sentence.strip()
            if not sentence:
                continue
            
            if len('. '.join(current_chunk + [sentence])) > self.max_chunk_size:
                if current_chunk:
                    chunk_text = '. '.join(current_chunk) + '.'
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='email',
                        metadata={'is_sub_chunk': True}
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 1
                
                # 单个句子就太大，按词分割
                if len(sentence) > self.max_chunk_size:
                    words = sentence.split()
                    word_chunk = []
                    word_length = 0
                    for word in words:
                        if word_length + len(word) + 1 > self.max_chunk_size:
                            if word_chunk:
                                chunk = Chunk(
                                    text=' '.join(word_chunk),
                                    start_index=current_start,
                                    end_index=current_start + len(' '.join(word_chunk)),
                                    chunk_type='email',
                                    metadata={'is_sub_chunk': True}
                                )
                                chunks.append(chunk)
                                word_chunk = []
                                current_start += len(' '.join(word_chunk)) + 1
                                word_length = 0
                        word_chunk.append(word)
                        word_length += len(word) + 1
                    if word_chunk:
                        chunk = Chunk(
                            text=' '.join(word_chunk),
                            start_index=current_start,
                            end_index=current_start + len(' '.join(word_chunk)),
                            chunk_type='email',
                            metadata={'is_sub_chunk': True}
                        )
                        chunks.append(chunk)
                else:
                    current_chunk = [sentence]
            else:
                current_chunk.append(sentence)
        
        # 处理剩余内容
        if current_chunk:
            chunk_text = '. '.join(current_chunk) + '.'
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='email',
                metadata={'is_sub_chunk': True}
            )
            chunks.append(chunk)
        
        return chunks


if __name__ == "__main__":
    main()