import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
from urllib.request import urlretrieve
import threading


class WebsiteScraper:
    def __init__(self, base_url, output_dir='downloaded_site'):
        self.base_url = base_url.rstrip('/')
        self.output_dir = output_dir
        self.visited = set()
        self.links = set()
        self.resources = []
        os.makedirs(output_dir, exist_ok=True)

    def save_page(self, url, content):
        path = urlparse(url).path
        file_path = os.path.join(self.output_dir, path)
        os.makedirs(os.path.dirname(file_path), exist_ok=True)

        with open(file_path, 'w', encoding='utf-8') as f:
            f.write(content)
        return file_path

    def download_resource(self, url, file_path):
        try:
            response = requests.get(url, stream=True)
            with open(file_path, 'wb') as f:
                for chunk in response.iter_content(1024):
                    f.write(chunk)
            self.resources.append(file_path)
        except Exception as e:
            print(f"下载资源失败: {url}, 错误: {str(e)}")

    def process_page(self, url):
        if url in self.visited:
            return

        print(f"正在处理: {url}")
        self.visited.add(url)

        try:
            response = requests.get(url, timeout=10)
            soup = BeautifulSoup(response.text, 'html.parser')

            # 保存HTML文件
            html_path = self.save_page(url, response.text)

            # 下载并替换资源链接
            for tag in soup.find_all(['link', 'script', 'img', 'style', 'iframe', 'source', 'video']):
                src = tag.get('src') or tag.get('href') or tag.get('srcset')
                if src and not src.startswith(('data:', 'http:', 'https:')):
                    src = urljoin(url, src)
                    abs_path = self.save_page(src, "")
                    tag['src'] = os.path.relpath(abs_path, os.path.dirname(html_path))

            # 处理页面中的链接
            for a_tag in soup.find_all('a', href=True):
                href = a_tag['href']
                if href.startswith(('http:', 'https:')):
                    continue
                if href and not href.startswith('#'):
                    full_url = urljoin(url, href)
                    if full_url not in self.visited:
                        self.links.add(full_url)

            # 多线程下载资源
            for tag in soup.select('img, link[href], script[src], source[src], video[src], iframe[src]'):
                src = tag.get('src') or tag.get('href')
                if src and src.startswith(('http:', 'https:')):
                    abs_path = self.save_page(src, "")
                    threading.Thread(target=self.download_resource, args=(src, abs_path)).start()
        except Exception as e:
            print(f"处理页面失败: {url}, 错误: {str(e)}")

    def start(self):
        print(f"开始抓取: {self.base_url}")
        self.process_page(self.base_url)

        while self.links:
            next_url = self.links.pop()
            self.process_page(next_url)

        print("抓取完成！共处理页面:", len(self.visited))
        print("下载资源:", len(self.resources))


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description="静态网站抓取工具")
    parser.add_argument("url", help="目标网站URL")
    parser.add_argument("-o", "--output", help="输出目录，默认为downloaded_site")
    args = parser.parse_args()

    scraper = WebsiteScraper(args.url, args.output or None)
    scraper.start()
