import markdown
from markdown.extensions import toc, tables, codehilite, fenced_code
from bs4 import BeautifulSoup
from typing import Dict, Any, List, Optional, Tuple
import re
import yaml

class MarkdownProcessor:
    """Markdown文档处理器"""
    
    def __init__(self):
        self.supported_formats = ['md', 'markdown']
        
        # 配置Markdown解析器
        self.md = markdown.Markdown(
            extensions=[
                'toc',
                'tables', 
                'fenced_code',
                'codehilite',
                'attr_list',
                'def_list',
                'footnotes',
                'md_in_html'
            ],
            extension_configs={
                'toc': {
                    'permalink': True,
                    'baselevel': 1
                },
                'codehilite': {
                    'css_class': 'highlight'
                }
            }
        )
    
    def extract_content(self, file_content: bytes) -> Dict[str, Any]:
        """
        从Markdown文件中提取内容和结构信息
        
        Args:
            file_content: Markdown文件的字节内容
            
        Returns:
            Dict[str, Any]: 包含文本内容和结构信息的字典
        """
        
        try:
            # 解码文本内容
            text_content = self._decode_content(file_content)
            
            # 分离Front Matter和正文
            front_matter, main_content = self._extract_front_matter(text_content)
            
            # 提取基本信息
            metadata = self._extract_metadata(text_content, front_matter)
            
            # 提取结构信息
            structure = self._extract_structure(main_content)
            
            # 提取代码块
            code_blocks = self._extract_code_blocks(main_content)
            
            # 提取表格
            tables = self._extract_tables(main_content)
            
            # 提取链接和图片
            links = self._extract_links(main_content)
            images = self._extract_images(main_content)
            
            # 转换为HTML（用于某些处理）
            html_content = self._convert_to_html(main_content)
            
            # 提取纯文本
            plain_text = self._extract_plain_text(main_content)
            
            result = {
                'text': plain_text,
                'raw_content': text_content,
                'main_content': main_content,
                'html_content': html_content,
                'metadata': metadata,
                'front_matter': front_matter,
                'structure': structure,
                'code_blocks': code_blocks,
                'tables': tables,
                'links': links,
                'images': images,
                'format': 'markdown'
            }
            
            return result
            
        except Exception as e:
            raise Exception(f"Markdown处理失败: {str(e)}")
    
    def _decode_content(self, file_content: bytes) -> str:
        """解码文件内容"""
        
        # 尝试不同的编码
        encodings = ['utf-8', 'utf-8-sig', 'gbk', 'gb2312', 'latin-1']
        
        for encoding in encodings:
            try:
                return file_content.decode(encoding)
            except UnicodeDecodeError:
                continue
        
        # 如果所有编码都失败，使用错误处理
        return file_content.decode('utf-8', errors='replace')
    
    def _extract_front_matter(self, content: str) -> Tuple[Dict[str, Any], str]:
        """提取Front Matter"""
        
        front_matter = {}
        main_content = content
        
        # 检查是否有YAML Front Matter
        yaml_pattern = r'^---\s*\n(.*?)\n---\s*\n'
        match = re.match(yaml_pattern, content, re.DOTALL)
        
        if match:
            try:
                yaml_content = match.group(1)
                front_matter = yaml.safe_load(yaml_content) or {}
                main_content = content[match.end():]
            except yaml.YAMLError:
                # 如果YAML解析失败，保留原始内容
                pass
        
        return front_matter, main_content
    
    def _extract_metadata(self, content: str, front_matter: Dict[str, Any]) -> Dict[str, Any]:
        """提取元数据"""
        
        metadata = {
            'title': '',
            'author': '',
            'date': '',
            'tags': [],
            'categories': [],
            'description': '',
            'word_count': 0,
            'line_count': 0,
            'character_count': 0
        }
        
        # 从Front Matter获取元数据
        if front_matter:
            metadata.update({
                'title': front_matter.get('title', ''),
                'author': front_matter.get('author', ''),
                'date': str(front_matter.get('date', '')),
                'tags': front_matter.get('tags', []),
                'categories': front_matter.get('categories', []),
                'description': front_matter.get('description', '')
            })
        
        # 如果没有标题，尝试从第一个标题提取
        if not metadata['title']:
            title_match = re.search(r'^#\s+(.+)$', content, re.MULTILINE)
            if title_match:
                metadata['title'] = title_match.group(1).strip()
        
        # 计算统计信息
        lines = content.split('\n')
        metadata['line_count'] = len(lines)
        metadata['character_count'] = len(content)
        
        # 计算单词数（去除Markdown语法）
        plain_text = self._extract_plain_text(content)
        words = re.findall(r'\b\w+\b', plain_text)
        metadata['word_count'] = len(words)
        
        return metadata
    
    def _extract_structure(self, content: str) -> Dict[str, Any]:
        """提取文档结构"""
        
        structure = {
            'headings': [],
            'sections': [],
            'toc': [],
            'outline': []
        }
        
        # 提取标题
        heading_pattern = r'^(#{1,6})\s+(.+)$'
        lines = content.split('\n')
        
        for line_num, line in enumerate(lines, 1):
            match = re.match(heading_pattern, line.strip())
            if match:
                level = len(match.group(1))
                title = match.group(2).strip()
                
                heading_info = {
                    'level': level,
                    'title': title,
                    'line_number': line_num,
                    'anchor': self._generate_anchor(title)
                }
                
                structure['headings'].append(heading_info)
        
        # 构建目录
        structure['toc'] = self._build_toc(structure['headings'])
        
        # 构建章节结构
        structure['sections'] = self._build_sections(structure['headings'], lines)
        
        # 构建大纲
        structure['outline'] = self._build_outline(structure['headings'])
        
        return structure
    
    def _generate_anchor(self, title: str) -> str:
        """生成锚点链接"""
        
        # 转换为小写，替换空格和特殊字符
        anchor = re.sub(r'[^\w\s-]', '', title.lower())
        anchor = re.sub(r'[-\s]+', '-', anchor)
        return anchor.strip('-')
    
    def _build_toc(self, headings: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """构建目录"""
        
        toc = []
        
        for heading in headings:
            toc_item = {
                'title': heading['title'],
                'level': heading['level'],
                'anchor': heading['anchor'],
                'line_number': heading['line_number']
            }
            toc.append(toc_item)
        
        return toc
    
    def _build_sections(self, headings: List[Dict[str, Any]], lines: List[str]) -> List[Dict[str, Any]]:
        """构建章节结构"""
        
        sections = []
        
        for i, heading in enumerate(headings):
            # 确定章节内容的结束位置
            start_line = heading['line_number']
            end_line = len(lines)
            
            # 找到下一个同级或更高级标题
            for j in range(i + 1, len(headings)):
                next_heading = headings[j]
                if next_heading['level'] <= heading['level']:
                    end_line = next_heading['line_number'] - 1
                    break
            
            # 提取章节内容
            section_lines = lines[start_line:end_line]
            section_content = '\n'.join(section_lines)
            
            section = {
                'title': heading['title'],
                'level': heading['level'],
                'start_line': start_line,
                'end_line': end_line,
                'content': section_content,
                'word_count': len(re.findall(r'\b\w+\b', section_content))
            }
            
            sections.append(section)
        
        return sections
    
    def _build_outline(self, headings: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """构建层级大纲"""
        
        outline = []
        stack = []
        
        for heading in headings:
            # 调整栈，移除级别更深的标题
            while stack and stack[-1]['level'] >= heading['level']:
                stack.pop()
            
            # 创建大纲项
            outline_item = {
                'title': heading['title'],
                'level': heading['level'],
                'anchor': heading['anchor'],
                'line_number': heading['line_number'],
                'children': []
            }
            
            # 添加到父级的children中
            if stack:
                stack[-1]['children'].append(outline_item)
            else:
                outline.append(outline_item)
            
            stack.append(outline_item)
        
        return outline
    
    def _extract_code_blocks(self, content: str) -> List[Dict[str, Any]]:
        """提取代码块"""
        
        code_blocks = []
        
        # 提取围栏代码块
        fenced_pattern = r'```(\w*)\n(.*?)\n```'
        for match in re.finditer(fenced_pattern, content, re.DOTALL):
            language = match.group(1) or 'text'
            code = match.group(2)
            
            code_blocks.append({
                'type': 'fenced',
                'language': language,
                'code': code,
                'start_pos': match.start(),
                'end_pos': match.end()
            })
        
        # 提取缩进代码块
        lines = content.split('\n')
        in_code_block = False
        current_block = []
        start_line = 0
        
        for i, line in enumerate(lines):
            if line.startswith('    ') or line.startswith('\t'):
                if not in_code_block:
                    in_code_block = True
                    start_line = i
                    current_block = []
                current_block.append(line[4:] if line.startswith('    ') else line[1:])
            else:
                if in_code_block and line.strip() == '':
                    current_block.append('')
                elif in_code_block:
                    # 代码块结束
                    code_blocks.append({
                        'type': 'indented',
                        'language': 'text',
                        'code': '\n'.join(current_block),
                        'start_line': start_line + 1,
                        'end_line': i
                    })
                    in_code_block = False
                    current_block = []
        
        # 处理文档末尾的代码块
        if in_code_block:
            code_blocks.append({
                'type': 'indented',
                'language': 'text',
                'code': '\n'.join(current_block),
                'start_line': start_line + 1,
                'end_line': len(lines)
            })
        
        return code_blocks
    
    def _extract_tables(self, content: str) -> List[Dict[str, Any]]:
        """提取表格"""
        
        tables = []
        lines = content.split('\n')
        
        i = 0
        while i < len(lines):
            line = lines[i].strip()
            
            # 检查是否是表格行（包含|）
            if '|' in line and line.count('|') >= 2:
                table_start = i
                table_lines = []
                
                # 收集表格行
                while i < len(lines) and '|' in lines[i]:
                    table_lines.append(lines[i].strip())
                    i += 1
                
                if len(table_lines) >= 2:  # 至少需要标题行和分隔行
                    table_info = self._parse_table(table_lines, table_start + 1)
                    if table_info:
                        tables.append(table_info)
            else:
                i += 1
        
        return tables
    
    def _parse_table(self, table_lines: List[str], start_line: int) -> Optional[Dict[str, Any]]:
        """解析表格"""
        
        try:
            # 解析表格行
            rows = []
            for line in table_lines:
                # 移除首尾的|，然后分割
                cells = [cell.strip() for cell in line.strip('|').split('|')]
                rows.append(cells)
            
            if len(rows) < 2:
                return None
            
            # 检查第二行是否是分隔行
            separator_row = rows[1]
            if not all(re.match(r'^:?-+:?$', cell.strip()) for cell in separator_row):
                return None
            
            # 提取表头和数据
            headers = rows[0]
            data_rows = rows[2:] if len(rows) > 2 else []
            
            # 分析列对齐
            alignments = []
            for cell in separator_row:
                cell = cell.strip()
                if cell.startswith(':') and cell.endswith(':'):
                    alignments.append('center')
                elif cell.endswith(':'):
                    alignments.append('right')
                else:
                    alignments.append('left')
            
            return {
                'start_line': start_line,
                'end_line': start_line + len(table_lines) - 1,
                'headers': headers,
                'data': data_rows,
                'alignments': alignments,
                'row_count': len(data_rows),
                'column_count': len(headers)
            }
        
        except Exception:
            return None
    
    def _extract_links(self, content: str) -> List[Dict[str, Any]]:
        """提取链接"""
        
        links = []
        
        # 提取Markdown链接 [text](url)
        link_pattern = r'\[([^\]]+)\]\(([^\)]+)\)'
        for match in re.finditer(link_pattern, content):
            text = match.group(1)
            url = match.group(2)
            
            links.append({
                'type': 'markdown',
                'text': text,
                'url': url,
                'start_pos': match.start(),
                'end_pos': match.end()
            })
        
        # 提取引用链接 [text][ref]
        ref_link_pattern = r'\[([^\]]+)\]\[([^\]]+)\]'
        for match in re.finditer(ref_link_pattern, content):
            text = match.group(1)
            ref = match.group(2)
            
            links.append({
                'type': 'reference',
                'text': text,
                'reference': ref,
                'start_pos': match.start(),
                'end_pos': match.end()
            })
        
        # 提取自动链接
        auto_link_pattern = r'<(https?://[^>]+)>'
        for match in re.finditer(auto_link_pattern, content):
            url = match.group(1)
            
            links.append({
                'type': 'auto',
                'text': url,
                'url': url,
                'start_pos': match.start(),
                'end_pos': match.end()
            })
        
        return links
    
    def _extract_images(self, content: str) -> List[Dict[str, Any]]:
        """提取图片"""
        
        images = []
        
        # 提取Markdown图片 ![alt](src)
        image_pattern = r'!\[([^\]]*)\]\(([^\)]+)\)'
        for match in re.finditer(image_pattern, content):
            alt_text = match.group(1)
            src = match.group(2)
            
            # 解析可能的标题
            title = ''
            if ' "' in src:
                src_parts = src.split(' "', 1)
                src = src_parts[0]
                title = src_parts[1].rstrip('"')
            
            images.append({
                'type': 'markdown',
                'alt_text': alt_text,
                'src': src,
                'title': title,
                'start_pos': match.start(),
                'end_pos': match.end()
            })
        
        # 提取引用图片 ![alt][ref]
        ref_image_pattern = r'!\[([^\]]*)\]\[([^\]]+)\]'
        for match in re.finditer(ref_image_pattern, content):
            alt_text = match.group(1)
            ref = match.group(2)
            
            images.append({
                'type': 'reference',
                'alt_text': alt_text,
                'reference': ref,
                'start_pos': match.start(),
                'end_pos': match.end()
            })
        
        return images
    
    def _convert_to_html(self, content: str) -> str:
        """转换为HTML"""
        
        try:
            # 重置Markdown解析器
            self.md.reset()
            
            # 转换为HTML
            html = self.md.convert(content)
            
            return html
        
        except Exception:
            return ''
    
    def _extract_plain_text(self, content: str) -> str:
        """提取纯文本"""
        
        # 转换为HTML然后提取文本
        html = self._convert_to_html(content)
        
        if html:
            soup = BeautifulSoup(html, 'html.parser')
            return soup.get_text(separator='\n', strip=True)
        else:
            # 如果HTML转换失败，手动清理Markdown语法
            text = content
            
            # 移除代码块
            text = re.sub(r'```.*?```', '', text, flags=re.DOTALL)
            text = re.sub(r'`[^`]+`', '', text)
            
            # 移除链接和图片
            text = re.sub(r'!?\[[^\]]*\]\([^\)]*\)', '', text)
            text = re.sub(r'!?\[[^\]]*\]\[[^\]]*\]', '', text)
            
            # 移除标题标记
            text = re.sub(r'^#{1,6}\s+', '', text, flags=re.MULTILINE)
            
            # 移除其他Markdown语法
            text = re.sub(r'\*\*([^*]+)\*\*', r'\1', text)  # 粗体
            text = re.sub(r'\*([^*]+)\*', r'\1', text)      # 斜体
            text = re.sub(r'~~([^~]+)~~', r'\1', text)      # 删除线
            
            # 清理多余的空行
            text = re.sub(r'\n\s*\n', '\n\n', text)
            
            return text.strip()
    
    def extract_metadata_extended(self, file_content: bytes) -> Dict[str, Any]:
        """提取扩展元数据"""
        
        try:
            content = self._decode_content(file_content)
            front_matter, main_content = self._extract_front_matter(content)
            
            # 基础元数据
            metadata = self._extract_metadata(content, front_matter)
            
            # 扩展统计信息
            metadata.update({
                'heading_count': len(re.findall(r'^#{1,6}\s+', main_content, re.MULTILINE)),
                'link_count': len(re.findall(r'\[([^\]]+)\]\(([^\)]+)\)', main_content)),
                'image_count': len(re.findall(r'!\[([^\]]*)\]\(([^\)]+)\)', main_content)),
                'code_block_count': len(re.findall(r'```', main_content)) // 2,
                'table_count': len(re.findall(r'\|.*\|', main_content)),
                'list_item_count': len(re.findall(r'^\s*[-*+]\s+', main_content, re.MULTILINE)),
                'numbered_list_count': len(re.findall(r'^\s*\d+\.\s+', main_content, re.MULTILINE))
            })
            
            return metadata
            
        except Exception as e:
            raise Exception(f"扩展元数据提取失败: {str(e)}")