#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
HTML文件处理模块

提供对HTML文件的读取、解析、修改、转换等功能
"""

import os
import re
import tempfile
from pathlib import Path
from typing import Union, List, Dict, Any, Optional, Tuple, Iterator, Set

# 导入异常处理装饰器
from ..exception.exception_handler import exception_handler
from ..exception.exceptions import FileException

# 导入日志模块
from ..logger.logger import Logger

# 创建日志记录器
logger = Logger("html_handler").get_logger()


class HTMLHandler:
    """
    HTML文件处理类
    
    提供对HTML文件的读取、解析、修改、转换等功能
    
    注意：此类需要安装beautifulsoup4和lxml库
    pip install beautifulsoup4 lxml
    """
    
    def __init__(self):
        """
        初始化HTML处理器
        """
        try:
            from bs4 import BeautifulSoup
            import lxml
            self.BeautifulSoup = BeautifulSoup
        except ImportError as e:
            logger.error(f"初始化HTML处理器失败: {e}")
            logger.error("请安装必要的依赖: pip install beautifulsoup4 lxml")
            raise ImportError("请安装必要的依赖: pip install beautifulsoup4 lxml") from e
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def read_html(self, html_path: Union[str, Path], encoding: str = 'utf-8') -> 'BeautifulSoup':
        """
        读取HTML文件并返回BeautifulSoup对象
        
        Args:
            html_path: HTML文件路径
            encoding: 文件编码，默认为'utf-8'
            
        Returns:
            BeautifulSoup对象
            
        Raises:
            FileException: 文件不存在或读取失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> soup = handler.read_html('example.html')
            >>> title = soup.title.string
        """
        html_path = Path(html_path)
        
        # 检查HTML文件是否存在
        if not html_path.exists():
            raise FileException(f"HTML文件不存在: {html_path}")
        
        try:
            # 读取HTML文件
            with open(html_path, 'r', encoding=encoding) as file:
                html_content = file.read()
            
            # 解析HTML
            soup = self.BeautifulSoup(html_content, 'lxml')
            
            logger.info(f"读取HTML文件成功: {html_path}")
            return soup
        except UnicodeDecodeError:
            # 尝试使用不同的编码
            try:
                with open(html_path, 'r', encoding='latin-1') as file:
                    html_content = file.read()
                
                soup = self.BeautifulSoup(html_content, 'lxml')
                logger.warning(f"使用latin-1编码读取HTML文件: {html_path}")
                return soup
            except Exception as e:
                raise FileException(f"读取HTML文件失败: {e}")
        except Exception as e:
            raise FileException(f"读取HTML文件失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def write_html(self, soup: 'BeautifulSoup', output_path: Union[str, Path], 
                 encoding: str = 'utf-8', pretty: bool = True) -> None:
        """
        将BeautifulSoup对象写入HTML文件
        
        Args:
            soup: BeautifulSoup对象
            output_path: 输出的HTML文件路径
            encoding: 文件编码，默认为'utf-8'
            pretty: 是否格式化HTML，默认为True
            
        Raises:
            FileException: 写入失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> soup = handler.read_html('example.html')
            >>> # 修改标题
            >>> soup.title.string = '新标题'
            >>> # 保存修改后的HTML
            >>> handler.write_html(soup, 'modified.html')
        """
        output_path = Path(output_path)
        
        # 确保输出目录存在
        output_path.parent.mkdir(parents=True, exist_ok=True)
        
        try:
            # 获取HTML内容
            if pretty:
                html_content = soup.prettify(encoding=encoding, formatter='html')
            else:
                html_content = soup.encode(encoding=encoding, formatter='html')
            
            # 写入HTML文件
            with open(output_path, 'wb') as file:
                file.write(html_content)
            
            logger.info(f"写入HTML文件成功: {output_path}")
        except Exception as e:
            raise FileException(f"写入HTML文件失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def extract_links(self, html_path: Union[str, Path], absolute_url: Optional[str] = None) -> List[Dict[str, str]]:
        """
        提取HTML文件中的链接
        
        Args:
            html_path: HTML文件路径
            absolute_url: 基础URL，用于转换相对链接为绝对链接，默认为None
            
        Returns:
            包含链接信息的字典列表，每个字典包含链接URL、文本、标题等信息
            
        Raises:
            FileException: 文件不存在或提取失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> # 提取链接
            >>> links = handler.extract_links('example.html')
            >>> # 提取链接并转换为绝对链接
            >>> links = handler.extract_links('example.html', 'https://example.com')
        """
        try:
            # 读取HTML文件
            soup = self.read_html(html_path)
            
            # 提取所有链接
            links = []
            for a_tag in soup.find_all('a', href=True):
                href = a_tag['href']
                
                # 如果提供了基础URL，转换相对链接为绝对链接
                if absolute_url and not (href.startswith('http://') or href.startswith('https://') or href.startswith('//')):                    
                    # 处理不同类型的相对链接
                    if href.startswith('/'):
                        # 以根目录开始的链接
                        from urllib.parse import urlparse
                        parsed_url = urlparse(absolute_url)
                        base = f"{parsed_url.scheme}://{parsed_url.netloc}"
                        href = base + href
                    else:
                        # 相对当前路径的链接
                        if not absolute_url.endswith('/'):
                            absolute_url = absolute_url + '/'
                        href = absolute_url + href
                
                # 提取链接文本和标题
                text = a_tag.get_text(strip=True)
                title = a_tag.get('title', '')
                
                links.append({
                    'url': href,
                    'text': text,
                    'title': title
                })
            
            logger.info(f"提取HTML链接成功: {html_path} (共 {len(links)} 个链接)")
            return links
        except Exception as e:
            raise FileException(f"提取HTML链接失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def extract_images(self, html_path: Union[str, Path], absolute_url: Optional[str] = None) -> List[Dict[str, str]]:
        """
        提取HTML文件中的图像
        
        Args:
            html_path: HTML文件路径
            absolute_url: 基础URL，用于转换相对链接为绝对链接，默认为None
            
        Returns:
            包含图像信息的字典列表，每个字典包含图像URL、替代文本、标题等信息
            
        Raises:
            FileException: 文件不存在或提取失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> # 提取图像
            >>> images = handler.extract_images('example.html')
            >>> # 提取图像并转换为绝对链接
            >>> images = handler.extract_images('example.html', 'https://example.com')
        """
        try:
            # 读取HTML文件
            soup = self.read_html(html_path)
            
            # 提取所有图像
            images = []
            for img_tag in soup.find_all('img'):
                src = img_tag.get('src', '')
                
                # 如果提供了基础URL，转换相对链接为绝对链接
                if src and absolute_url and not (src.startswith('http://') or src.startswith('https://') or src.startswith('//') or src.startswith('data:')):
                    # 处理不同类型的相对链接
                    if src.startswith('/'):
                        # 以根目录开始的链接
                        from urllib.parse import urlparse
                        parsed_url = urlparse(absolute_url)
                        base = f"{parsed_url.scheme}://{parsed_url.netloc}"
                        src = base + src
                    else:
                        # 相对当前路径的链接
                        if not absolute_url.endswith('/'):
                            absolute_url = absolute_url + '/'
                        src = absolute_url + src
                
                # 提取图像替代文本和标题
                alt = img_tag.get('alt', '')
                title = img_tag.get('title', '')
                width = img_tag.get('width', '')
                height = img_tag.get('height', '')
                
                images.append({
                    'src': src,
                    'alt': alt,
                    'title': title,
                    'width': width,
                    'height': height
                })
            
            logger.info(f"提取HTML图像成功: {html_path} (共 {len(images)} 个图像)")
            return images
        except Exception as e:
            raise FileException(f"提取HTML图像失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def extract_tables(self, html_path: Union[str, Path]) -> List[List[List[str]]]:
        """
        提取HTML文件中的表格
        
        Args:
            html_path: HTML文件路径
            
        Returns:
            表格数据列表，每个表格是一个二维列表（行和列）
            
        Raises:
            FileException: 文件不存在或提取失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> tables = handler.extract_tables('example.html')
            >>> # 访问第一个表格的数据
            >>> if tables:
            ...     first_table = tables[0]
            ...     for row in first_table:
            ...         print(row)
        """
        try:
            # 读取HTML文件
            soup = self.read_html(html_path)
            
            # 提取所有表格
            tables = []
            for table in soup.find_all('table'):
                table_data = []
                
                # 处理表头
                headers = []
                for th in table.find_all('th'):
                    headers.append(th.get_text(strip=True))
                
                if headers:
                    table_data.append(headers)
                
                # 处理表格行
                for tr in table.find_all('tr'):
                    row = []
                    # 如果行中没有th元素，则处理td元素
                    if not tr.find_all('th') or (headers and tr.find_all('td')):
                        for td in tr.find_all('td'):
                            row.append(td.get_text(strip=True))
                        if row:  # 只添加非空行
                            table_data.append(row)
                
                if table_data:  # 只添加非空表格
                    tables.append(table_data)
            
            logger.info(f"提取HTML表格成功: {html_path} (共 {len(tables)} 个表格)")
            return tables
        except Exception as e:
            raise FileException(f"提取HTML表格失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def extract_text(self, html_path: Union[str, Path], include_links: bool = False) -> str:
        """
        提取HTML文件中的文本内容
        
        Args:
            html_path: HTML文件路径
            include_links: 是否包含链接URL，默认为False
            
        Returns:
            提取的文本内容
            
        Raises:
            FileException: 文件不存在或提取失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> # 提取纯文本
            >>> text = handler.extract_text('example.html')
            >>> # 提取文本并包含链接
            >>> text = handler.extract_text('example.html', include_links=True)
        """
        try:
            # 读取HTML文件
            soup = self.read_html(html_path)
            
            # 移除脚本和样式元素
            for script in soup(["script", "style"]):
                script.extract()
            
            # 获取文本
            text = soup.get_text(separator='\n', strip=True)
            
            # 如果需要包含链接
            if include_links:
                links = []
                for a in soup.find_all('a', href=True):
                    link_text = a.get_text(strip=True)
                    link_url = a['href']
                    if link_text and link_url:
                        links.append(f"{link_text} [{link_url}]")
                
                if links:
                    text += "\n\n链接:\n" + "\n".join(links)
            
            logger.info(f"提取HTML文本成功: {html_path}")
            return text
        except Exception as e:
            raise FileException(f"提取HTML文本失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def convert_to_pdf(self, html_path: Union[str, Path], output_path: Union[str, Path]) -> None:
        """
        将HTML文件转换为PDF
        
        Args:
            html_path: HTML文件路径
            output_path: 输出的PDF文件路径
            
        Raises:
            FileException: 文件不存在或转换失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> handler.convert_to_pdf('example.html', 'example.pdf')
        """
        html_path = Path(html_path)
        output_path = Path(output_path)
        
        # 检查HTML文件是否存在
        if not html_path.exists():
            raise FileException(f"HTML文件不存在: {html_path}")
        
        # 确保输出目录存在
        output_path.parent.mkdir(parents=True, exist_ok=True)
        
        try:
            # 导入必要的库
            try:
                import pdfkit
            except ImportError:
                raise ImportError("请安装必要的依赖: pip install pdfkit wkhtmltopdf")
            
            # 转换HTML为PDF
            pdfkit.from_file(str(html_path), str(output_path))
            
            logger.info(f"HTML转换为PDF成功: {html_path} -> {output_path}")
        except Exception as e:
            raise FileException(f"HTML转换为PDF失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def convert_to_markdown(self, html_path: Union[str, Path], output_path: Union[str, Path]) -> None:
        """
        将HTML文件转换为Markdown
        
        Args:
            html_path: HTML文件路径
            output_path: 输出的Markdown文件路径
            
        Raises:
            FileException: 文件不存在或转换失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> handler.convert_to_markdown('example.html', 'example.md')
        """
        html_path = Path(html_path)
        output_path = Path(output_path)
        
        # 检查HTML文件是否存在
        if not html_path.exists():
            raise FileException(f"HTML文件不存在: {html_path}")
        
        # 确保输出目录存在
        output_path.parent.mkdir(parents=True, exist_ok=True)
        
        try:
            # 导入必要的库
            try:
                import html2text
            except ImportError:
                raise ImportError("请安装必要的依赖: pip install html2text")
            
            # 读取HTML文件
            with open(html_path, 'r', encoding='utf-8') as file:
                html_content = file.read()
            
            # 转换HTML为Markdown
            h = html2text.HTML2Text()
            h.ignore_links = False
            h.ignore_images = False
            h.ignore_tables = False
            h.body_width = 0  # 不自动换行
            
            markdown_content = h.handle(html_content)
            
            # 写入Markdown文件
            with open(output_path, 'w', encoding='utf-8') as file:
                file.write(markdown_content)
            
            logger.info(f"HTML转换为Markdown成功: {html_path} -> {output_path}")
        except UnicodeDecodeError:
            # 尝试使用不同的编码
            try:
                with open(html_path, 'r', encoding='latin-1') as file:
                    html_content = file.read()
                
                h = html2text.HTML2Text()
                h.ignore_links = False
                h.ignore_images = False
                h.ignore_tables = False
                h.body_width = 0  # 不自动换行
                
                markdown_content = h.handle(html_content)
                
                with open(output_path, 'w', encoding='utf-8') as file:
                    file.write(markdown_content)
                
                logger.warning(f"使用latin-1编码读取HTML文件: {html_path}")
                logger.info(f"HTML转换为Markdown成功: {html_path} -> {output_path}")
            except Exception as e:
                raise FileException(f"HTML转换为Markdown失败: {e}")
        except Exception as e:
            raise FileException(f"HTML转换为Markdown失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def minify_html(self, html_path: Union[str, Path], output_path: Union[str, Path]) -> None:
        """
        压缩HTML文件
        
        Args:
            html_path: HTML文件路径
            output_path: 输出的压缩HTML文件路径
            
        Raises:
            FileException: 文件不存在或压缩失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> handler.minify_html('example.html', 'example.min.html')
        """
        html_path = Path(html_path)
        output_path = Path(output_path)
        
        # 检查HTML文件是否存在
        if not html_path.exists():
            raise FileException(f"HTML文件不存在: {html_path}")
        
        # 确保输出目录存在
        output_path.parent.mkdir(parents=True, exist_ok=True)
        
        try:
            # 导入必要的库
            try:
                import htmlmin
            except ImportError:
                raise ImportError("请安装必要的依赖: pip install htmlmin")
            
            # 读取HTML文件
            with open(html_path, 'r', encoding='utf-8') as file:
                html_content = file.read()
            
            # 压缩HTML
            minified_html = htmlmin.minify(html_content, 
                                         remove_comments=True,
                                         remove_empty_space=True,
                                         remove_all_empty_space=False,
                                         reduce_boolean_attributes=True)
            
            # 写入压缩后的HTML文件
            with open(output_path, 'w', encoding='utf-8') as file:
                file.write(minified_html)
            
            # 计算压缩率
            original_size = len(html_content)
            minified_size = len(minified_html)
            compression_ratio = (1 - minified_size / original_size) * 100
            
            logger.info(f"压缩HTML文件成功: {html_path} -> {output_path} (压缩率: {compression_ratio:.2f}%)")
        except UnicodeDecodeError:
            # 尝试使用不同的编码
            try:
                with open(html_path, 'r', encoding='latin-1') as file:
                    html_content = file.read()
                
                minified_html = htmlmin.minify(html_content, 
                                             remove_comments=True,
                                             remove_empty_space=True,
                                             remove_all_empty_space=False,
                                             reduce_boolean_attributes=True)
                
                with open(output_path, 'w', encoding='latin-1') as file:
                    file.write(minified_html)
                
                original_size = len(html_content)
                minified_size = len(minified_html)
                compression_ratio = (1 - minified_size / original_size) * 100
                
                logger.warning(f"使用latin-1编码读取HTML文件: {html_path}")
                logger.info(f"压缩HTML文件成功: {html_path} -> {output_path} (压缩率: {compression_ratio:.2f}%)")
            except Exception as e:
                raise FileException(f"压缩HTML文件失败: {e}")
        except Exception as e:
            raise FileException(f"压缩HTML文件失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def validate_html(self, html_path: Union[str, Path]) -> Dict[str, Any]:
        """
        验证HTML文件的有效性
        
        Args:
            html_path: HTML文件路径
            
        Returns:
            包含验证结果的字典，包括是否有效、错误列表等
            
        Raises:
            FileException: 文件不存在或验证失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> result = handler.validate_html('example.html')
            >>> if result['valid']:
            ...     print("HTML文件有效")
            ... else:
            ...     print(f"HTML文件无效，错误数: {len(result['errors'])}")
        """
        html_path = Path(html_path)
        
        # 检查HTML文件是否存在
        if not html_path.exists():
            raise FileException(f"HTML文件不存在: {html_path}")
        
        try:
            # 导入必要的库
            try:
                from html.parser import HTMLParser
            except ImportError:
                # Python 3已内置html.parser
                pass
            
            # 创建自定义HTML解析器来验证HTML
            class HTMLValidator(HTMLParser):
                def __init__(self):
                    super().__init__()
                    self.errors = []
                    self.tags_stack = []
                
                def handle_starttag(self, tag, attrs):
                    if tag not in ['area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 
                                  'link', 'meta', 'param', 'source', 'track', 'wbr']:
                        self.tags_stack.append(tag)
                
                def handle_endtag(self, tag):
                    if tag not in ['area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 
                                  'link', 'meta', 'param', 'source', 'track', 'wbr']:
                        if self.tags_stack and self.tags_stack[-1] == tag:
                            self.tags_stack.pop()
                        else:
                            self.errors.append(f"标签不匹配: 找到结束标签 </{ tag }>, 但期望的是 </{ self.tags_stack[-1] if self.tags_stack else 'None' }>")
                
                def error(self, message):
                    self.errors.append(message)
            
            # 读取HTML文件
            with open(html_path, 'r', encoding='utf-8') as file:
                html_content = file.read()
            
            # 验证HTML
            validator = HTMLValidator()
            validator.feed(html_content)
            
            # 检查是否有未闭合的标签
            if validator.tags_stack:
                for tag in reversed(validator.tags_stack):
                    validator.errors.append(f"未闭合的标签: <{tag}>")
            
            # 构建结果
            result = {
                'valid': len(validator.errors) == 0,
                'errors': validator.errors,
                'error_count': len(validator.errors)
            }
            
            logger.info(f"验证HTML文件完成: {html_path} (有效: {result['valid']}, 错误数: {result['error_count']})")
            return result
        except UnicodeDecodeError:
            # 尝试使用不同的编码
            try:
                with open(html_path, 'r', encoding='latin-1') as file:
                    html_content = file.read()
                
                validator = HTMLValidator()
                validator.feed(html_content)
                
                if validator.tags_stack:
                    for tag in reversed(validator.tags_stack):
                        validator.errors.append(f"未闭合的标签: <{tag}>")
                
                result = {
                    'valid': len(validator.errors) == 0,
                    'errors': validator.errors,
                    'error_count': len(validator.errors)
                }
                
                logger.warning(f"使用latin-1编码读取HTML文件: {html_path}")
                logger.info(f"验证HTML文件完成: {html_path} (有效: {result['valid']}, 错误数: {result['error_count']})")
                return result
            except Exception as e:
                raise FileException(f"验证HTML文件失败: {e}")
        except Exception as e:
            raise FileException(f"验证HTML文件失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def replace_links(self, html_path: Union[str, Path], output_path: Union[str, Path], 
                    replacements: Dict[str, str]) -> None:
        """
        替换HTML文件中的链接
        
        Args:
            html_path: HTML文件路径
            output_path: 输出的HTML文件路径
            replacements: 链接替换字典，键为要替换的链接，值为替换后的链接
            
        Raises:
            FileException: 文件不存在或替换失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> # 替换链接
            >>> replacements = {
            ...     'http://old-domain.com': 'http://new-domain.com',
            ...     'images/old.jpg': 'images/new.jpg'
            ... }
            >>> handler.replace_links('example.html', 'modified.html', replacements)
        """
        try:
            # 读取HTML文件
            soup = self.read_html(html_path)
            
            # 替换链接
            replace_count = 0
            
            # 替换a标签的href属性
            for a_tag in soup.find_all('a', href=True):
                href = a_tag['href']
                for old_link, new_link in replacements.items():
                    if old_link in href:
                        a_tag['href'] = href.replace(old_link, new_link)
                        replace_count += 1
            
            # 替换img标签的src属性
            for img_tag in soup.find_all('img', src=True):
                src = img_tag['src']
                for old_link, new_link in replacements.items():
                    if old_link in src:
                        img_tag['src'] = src.replace(old_link, new_link)
                        replace_count += 1
            
            # 替换link标签的href属性
            for link_tag in soup.find_all('link', href=True):
                href = link_tag['href']
                for old_link, new_link in replacements.items():
                    if old_link in href:
                        link_tag['href'] = href.replace(old_link, new_link)
                        replace_count += 1
            
            # 替换script标签的src属性
            for script_tag in soup.find_all('script', src=True):
                src = script_tag['src']
                for old_link, new_link in replacements.items():
                    if old_link in src:
                        script_tag['src'] = src.replace(old_link, new_link)
                        replace_count += 1
            
            # 写入修改后的HTML文件
            self.write_html(soup, output_path)
            
            logger.info(f"替换HTML链接成功: {html_path} -> {output_path} (替换了 {replace_count} 个链接)")
        except Exception as e:
            raise FileException(f"替换HTML链接失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def add_meta_tags(self, html_path: Union[str, Path], output_path: Union[str, Path], 
                    meta_tags: List[Dict[str, str]]) -> None:
        """
        向HTML文件添加meta标签
        
        Args:
            html_path: HTML文件路径
            output_path: 输出的HTML文件路径
            meta_tags: meta标签列表，每个标签是一个包含属性的字典
            
        Raises:
            FileException: 文件不存在或添加失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> # 添加meta标签
            >>> meta_tags = [
            ...     {'name': 'description', 'content': '网站描述'},
            ...     {'name': 'keywords', 'content': '关键词1,关键词2'},
            ...     {'property': 'og:title', 'content': '页面标题'}
            ... ]
            >>> handler.add_meta_tags('example.html', 'modified.html', meta_tags)
        """
        try:
            # 读取HTML文件
            soup = self.read_html(html_path)
            
            # 获取head标签，如果不存在则创建
            head = soup.head
            if not head:
                head = soup.new_tag('head')
                if soup.html:
                    soup.html.insert(0, head)
                else:
                    html = soup.new_tag('html')
                    html.append(head)
                    soup.append(html)
            
            # 添加meta标签
            for meta_dict in meta_tags:
                meta = soup.new_tag('meta')
                for attr, value in meta_dict.items():
                    meta[attr] = value
                head.append(meta)
            
            # 写入修改后的HTML文件
            self.write_html(soup, output_path)
            
            logger.info(f"添加HTML meta标签成功: {html_path} -> {output_path} (添加了 {len(meta_tags)} 个标签)")
        except Exception as e:
            raise FileException(f"添加HTML meta标签失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def add_css(self, html_path: Union[str, Path], output_path: Union[str, Path], 
              css_content: str, inline: bool = True) -> None:
        """
        向HTML文件添加CSS样式
        
        Args:
            html_path: HTML文件路径
            output_path: 输出的HTML文件路径
            css_content: CSS内容
            inline: 是否内联CSS，默认为True（使用style标签），False则使用link标签
            
        Raises:
            FileException: 文件不存在或添加失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> # 添加内联CSS
            >>> css = 'body { font-family: Arial; } .header { color: blue; }'
            >>> handler.add_css('example.html', 'styled.html', css)
            >>> # 添加外部CSS链接
            >>> handler.add_css('example.html', 'styled.html', 'styles.css', inline=False)
        """
        try:
            # 读取HTML文件
            soup = self.read_html(html_path)
            
            # 获取head标签，如果不存在则创建
            head = soup.head
            if not head:
                head = soup.new_tag('head')
                if soup.html:
                    soup.html.insert(0, head)
                else:
                    html = soup.new_tag('html')
                    html.append(head)
                    soup.append(html)
            
            # 添加CSS
            if inline:
                # 内联CSS（使用style标签）
                style = soup.new_tag('style')
                style.string = css_content
                head.append(style)
            else:
                # 外部CSS（使用link标签）
                link = soup.new_tag('link')
                link['rel'] = 'stylesheet'
                link['href'] = css_content
                head.append(link)
            
            # 写入修改后的HTML文件
            self.write_html(soup, output_path)
            
            logger.info(f"添加HTML CSS样式成功: {html_path} -> {output_path}")
        except Exception as e:
            raise FileException(f"添加HTML CSS样式失败: {e}")
    
    @exception_handler(exception_type=Exception, reraise_as=FileException)
    def add_javascript(self, html_path: Union[str, Path], output_path: Union[str, Path], 
                     js_content: str, inline: bool = True) -> None:
        """
        向HTML文件添加JavaScript脚本
        
        Args:
            html_path: HTML文件路径
            output_path: 输出的HTML文件路径
            js_content: JavaScript内容
            inline: 是否内联JavaScript，默认为True（使用script标签），False则使用外部脚本
            
        Raises:
            FileException: 文件不存在或添加失败时抛出
            
        Examples:
            >>> handler = HTMLHandler()
            >>> # 添加内联JavaScript
            >>> js = 'function sayHello() { alert("Hello!"); }'
            >>> handler.add_javascript('example.html', 'scripted.html', js)
            >>> # 添加外部JavaScript链接
            >>> handler.add_javascript('example.html', 'scripted.html', 'script.js', inline=False)
        """
        try:
            # 读取HTML文件
            soup = self.read_html(html_path)
            
            # 获取body标签，如果不存在则创建
            body = soup.body
            if not body and soup.html:
                body = soup.new_tag('body')
                soup.html.append(body)
            elif not body:
                html = soup.new_tag('html')
                body = soup.new_tag('body')
                html.append(body)
                soup.append(html)
            
            # 添加JavaScript
            script = soup.new_tag('script')
            if inline:
                # 内联JavaScript
                script.string = js_content
            else:
                # 外部JavaScript
                script['src'] = js_content
            
            # 将脚本添加到body末尾
            body.append(script)
            
            # 写入修改后的HTML文件
            self.write_html(soup, output_path)
            
            logger.info(f"添加HTML JavaScript脚本成功: {html_path} -> {output_path}")
        except Exception as e:
            raise FileException(f"添加HTML JavaScript脚本失败: {e}")