# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import os
import re
import urllib.parse
import shutil

# #################### 用户配置区域 ####################
WEBPAGE_CONFIG = {
    'output_dir': 'saved_pages',    # 网页保存目录
    'filename': None,               # 自动根据标题生成文件名
    'beautify_html': False,         # 是否美化HTML结构
    'save_resources': True,         # 是否保存资源文件
    'resource_types': ['.css', '.js', '.png', '.jpg', '.jpeg', '.gif', '.svg', '.ico'],  # 要保存的资源类型
}

def download_resource(url, save_dir, base_url):
    """下载资源文件"""
    try:
        # 处理相对路径
        if not url.startswith(('http://', 'https://')):
            url = urllib.parse.urljoin(base_url, url)
        
        # 创建本地保存路径
        parsed_url = urllib.parse.urlparse(url)
        local_path = os.path.join(save_dir, 'resources', parsed_url.netloc, parsed_url.path.lstrip('/'))
        os.makedirs(os.path.dirname(local_path), exist_ok=True)
        
        # 下载文件
        response = requests.get(url, stream=True)
        if response.status_code == 200:
            with open(local_path, 'wb') as f:
                response.raw.decode_content = True
                shutil.copyfileobj(response.raw, f)
            
            # 返回相对路径
            return os.path.relpath(local_path, save_dir).replace('\\', '/')
    except Exception as e:
        print(f"资源下载失败 {url}: {str(e)}")
    return url

def save_webpage_html(url):
    """保存完整网页内容"""
    # 创建保存目录
    save_dir = os.path.join(WEBPAGE_CONFIG['output_dir'])
    os.makedirs(save_dir, exist_ok=True)
    
    # 获取网页内容
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
    }
    response = requests.get(url, headers=headers)
    response.encoding = response.apparent_encoding
    
    # 解析HTML
    soup = BeautifulSoup(response.text, 'html.parser')
    
    if WEBPAGE_CONFIG['save_resources']:
        # 处理所有链接
        for link in soup.find_all(['a', 'link']):
            href = link.get('href')
            if href:
                if href.startswith(('http://', 'https://')):
                    continue  # 保持外部链接不变
                elif href.startswith('#'):
                    continue  # 保持页内锚点不变
                elif href.startswith('mailto:'):
                    continue  # 保持邮件链接不变
                else:
                    # 转换相对路径为绝对路径
                    link['href'] = urllib.parse.urljoin(url, href)

        # 处理CSS文件
        for css in soup.find_all('link', rel='stylesheet'):
            if css.get('href'):
                new_path = download_resource(css['href'], save_dir, url)
                css['href'] = new_path

        # 处理JavaScript文件
        for js in soup.find_all('script', src=True):
            new_path = download_resource(js['src'], save_dir, url)
            js['src'] = new_path

        # 处理图片和其他媒体资源
        for media in soup.find_all(['img', 'video', 'audio', 'source']):
            src = media.get('src')
            if src:
                new_path = download_resource(src, save_dir, url)
                media['src'] = new_path
            # 处理srcset属性
            srcset = media.get('srcset')
            if srcset:
                new_srcset = []
                for src_item in srcset.split(','):
                    src_parts = src_item.strip().split()
                    if len(src_parts) > 0:
                        new_path = download_resource(src_parts[0], save_dir, url)
                        new_srcset.append(f"{new_path} {' '.join(src_parts[1:])}")
                media['srcset'] = ', '.join(new_srcset)

    # 处理图片
        for img in soup.find_all('img', src=True):
            new_path = download_resource(img['src'], save_dir, url)
            img['src'] = new_path

    # 生成文件名
    if not WEBPAGE_CONFIG['filename']:
        title = soup.title.string.strip() if soup.title else 'untitled'
        filename = re.sub(r'[\\/*?:"<>|]', '_', title)[:50] + '.html'
    else:
        filename = WEBPAGE_CONFIG['filename']
    
    # 保存HTML文件
    save_path = os.path.join(save_dir, filename)
    with open(save_path, 'w', encoding='utf-8') as f:
        f.write(f'<!-- Saved from {url} -->\n')
        f.write(str(soup))
    
    print(f"网页已保存至: {os.path.abspath(save_path)}")
    return save_path

def get_url_input():
    """获取用户输入的网页地址"""
    while True:
        url = input("请输入要保存的网页地址 (按 q 退出): ").strip()
        if url.lower() == 'q':
            return None
        if url.startswith(('http://', 'https://')):
            return url
        print("错误：请输入有效的网页地址（以 http:// 或 https:// 开头）")

def cleanup_resources(save_dir):
    """清理资源文件"""
    resources_dir = os.path.join(save_dir, 'resources')
    if not os.path.exists(resources_dir):
        return
        
    try:
        # 先修改文件权限
        for root, dirs, files in os.walk(resources_dir):
            for dir in dirs:
                os.chmod(os.path.join(root, dir), 0o777)
            for file in files:
                try:
                    os.chmod(os.path.join(root, file), 0o777)
                except:
                    pass
        
        # 尝试删除
        shutil.rmtree(resources_dir, ignore_errors=True)
        
        # 检查是否删除成功
        if os.path.exists(resources_dir):
            print("部分文件可能未能完全清理，请手动删除资源文件夹")
        else:
            print("资源文件已清理完成")
            
    except Exception as e:
        print(f"清理过程中出现错误: {str(e)}")
        print("建议手动删除 saved_pages/resources 文件夹")

if __name__ == "__main__":
    while True:
        url = get_url_input()
        if url is None:
            print("程序已退出")
            break
            
        try:
            saved_path = save_webpage_html(url)
            
            # 询问是否清理资源文件
            cleanup = input("\n是否清理下载的资源文件？(y/n): ").strip().lower()
            if cleanup == 'y':
                cleanup_resources(WEBPAGE_CONFIG['output_dir'])
            
            # 询问是否继续
            choice = input("\n是否继续保存其他网页？(y/n): ").strip().lower()
            if choice != 'y':
                print("程序已退出")
                break
                
        except Exception as e:
            print(f"保存失败：{str(e)}")
            continue