import os
import logging
from typing import List, Dict
from .deep_crawler import DeepCrawler
from .pdf_converter import PDFConverter

class ArticleProcessor:
    """
    文章处理类，整合爬虫和PDF转换功能
    """
    def __init__(self, html_output_dir: str = "data/articles", pdf_output_dir: str = "data/pdfs"):
        """
        初始化文章处理器
        
        Args:
            html_output_dir: HTML文件输出目录
            pdf_output_dir: PDF文件输出目录
        """
        self.html_output_dir = html_output_dir
        self.pdf_output_dir = pdf_output_dir
        self.crawler = DeepCrawler(max_depth=2)
        self.pdf_converter = PDFConverter()
        self.logger = logging.getLogger(__name__)

    def process_url(self, url: str) -> Dict[str, List[str]]:
        """
        处理单个URL
        
        Args:
            url: 目标URL
            
        Returns:
            Dict[str, List[str]]: 处理结果，包含HTML和PDF文件路径
        """
        try:
            # 爬取文章
            self.logger.info(f"Starting to crawl URL: {url}")
            html_contents = self.crawler.crawl(url)
            
            # 保存HTML文件
            html_files = self.crawler.save_html_content(html_contents, self.html_output_dir)
            
            # 转换为PDF
            pdf_files = self.pdf_converter.batch_convert(html_files, self.pdf_output_dir)
            
            return {
                "html_files": html_files,
                "pdf_files": pdf_files
            }
            
        except Exception as e:
            self.logger.error(f"Error processing URL {url}: {str(e)}")
            return {
                "html_files": [],
                "pdf_files": []
            }

    def process_urls(self, urls: List[str]) -> Dict[str, List[str]]:
        """
        批量处理多个URL
        
        Args:
            urls: URL列表
            
        Returns:
            Dict[str, List[str]]: 处理结果，包含所有HTML和PDF文件路径
        """
        all_html_files = []
        all_pdf_files = []
        
        for url in urls:
            result = self.process_url(url)
            all_html_files.extend(result["html_files"])
            all_pdf_files.extend(result["pdf_files"])
            
        return {
            "html_files": all_html_files,
            "pdf_files": all_pdf_files
        } 