import os
import re
from urllib.request import urlopen
from urllib.parse import urljoin, urlparse
from pathlib import Path

class ResourceDownloader:
    """资源下载与路径替换器，优化了百度等复杂网页的资源下载"""
    def __init__(self, base_url, html_content, progress_callback=None):
        self.base_url = base_url
        self.html_content = html_content
        self.resource_dir = "resources"
        os.makedirs(self.resource_dir, exist_ok=True)
        self.progress_callback = progress_callback
        self.total_resources = 0
        self.downloaded_resources = 0
        self.current_resource = ""
        self.valid_resource_extensions = [
            '.jpg', '.jpeg', '.png', '.gif', '.bmp', '.ico',
            '.css', '.js', '.woff', '.woff2', '.ttf', '.eot', '.svg','.xml'
        ]
    
    def process_resources(self):
        """处理所有外部资源，优化百度网页的相对路径处理"""
        patterns = [
            (r'(href|src)=["\'](.*?)["\']', self._process_generic_resource),
            (r'url\(["\']?(.*?)["\']?\)', self._process_css_resource)
        ]

        # 统计需要处理的资源数量
        self.total_resources = 0
        for pattern, _ in patterns:
            self.total_resources += len(re.findall(pattern, self.html_content))

        modified_html = self.html_content
        for pattern, processor in patterns:
            modified_html = re.sub(pattern, processor, modified_html)
        return modified_html
    
    def _process_generic_resource(self, match):
        """处理HTML标签中的资源（img, link, script等）"""
        attr = match.group(1)
        original_url = match.group(2)
        self.current_resource = original_url
        new_url = self._download_and_replace(original_url)
        self._update_progress()
        return f'{attr}="{new_url}"'
    
    def _process_css_resource(self, match):
        """处理CSS中的背景资源"""
        original_url = match.group(1)
        self.current_resource = original_url
        new_url = self._download_and_replace(original_url)
        self._update_progress()
        return f'url("{new_url}")'
    
    def _update_progress(self):
        """更新进度条和状态"""
        self.downloaded_resources += 1
        if self.progress_callback and self.total_resources > 0:
            progress = (self.downloaded_resources / self.total_resources) * 100
            filename = self.current_resource.split('/')[-1] or "资源"
            self.progress_callback(f"下载中: {filename} ({self.downloaded_resources}/{self.total_resources}, {progress:.1f}%)")
    
    def _download_and_replace(self, original_url):
        """下载资源并返回本地路径，优化绝对路径处理"""
        if not original_url or original_url.startswith(('data:', '#', 'javascript:')):
            return original_url

        # 处理绝对URL和相对URL
        if original_url.startswith(('http://', 'https://')):
            absolute_url = original_url
        else:
            absolute_url = urljoin(self.base_url, original_url)

        try:
            local_path = self._download_resource(absolute_url)
            if local_path:
                return os.path.join(self.resource_dir, local_path)
            else:
                return original_url
        except Exception as e:
            print(f"资源下载失败: {str(e)}")
            return original_url
    
    def _download_resource(self, url):
        """下载单个资源，添加资源类型过滤"""
        try:
            # 过滤非资源文件
            parsed = urlparse(url)
            file_ext = os.path.splitext(parsed.path)[1].lower()
            if file_ext and file_ext not in self.valid_resource_extensions:
                print(f"跳过非资源文件: {url}")
                return None

            # 生成安全文件名
            filename = os.path.basename(parsed.path) or "untitled"
            if '.' not in filename:
                filename += ".html"
            filename = self._sanitize_filename(filename)

            # 处理重复文件名
            dest_path = Path(self.resource_dir) / filename
            counter = 1
            while dest_path.exists():
                stem = dest_path.stem
                new_name = f"{stem}_{counter}{dest_path.suffix}"
                dest_path = dest_path.with_name(new_name)
                counter += 1

            # 下载资源
            with urlopen(url) as response:
                with open(dest_path, 'wb') as f:
                    f.write(response.read())

            return dest_path.name
        except Exception as e:
            print(f"下载 {url} 失败: {str(e)}")
            return None
    
    def _sanitize_filename(self, name):
        """清理非法文件名"""
        return re.sub(r'[\\/*?:"<>|]', "_", name).strip()
