# !/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :crawler.py
# @Time      :2025/7/12 18:46
# @Author    :aiyimu

import threading
import requests
from bs4 import BeautifulSoup
import queue


class Crawler:
    def __init__(self, start_url, max_threads=5):
        self.visited = set()
        self.queue = queue.Queue()
        self.queue.put(start_url)
        self.max_threads = max_threads
        self.lock = threading.Lock()

    def crawl(self):
        while True:
            try:
                url = self.queue.get(timeout=1)
            except queue.Empty:
                break

            if url in self.visited:
                self.queue.task_done()
                continue

            print(f"抓取: {url}")
            try:
                response = requests.get(url)
                soup = BeautifulSoup(response.text, 'html.parser')

                # 处理页面内容...
                print(f"标题: {soup.title.string if soup.title else '无标题'}")

                # 找到新链接
                for link in soup.find_all('a'):
                    href = link.get('href')
                    if href and href.startswith('http'):
                        with self.lock:
                            if href not in self.visited:
                                self.queue.put(href)

                with self.lock:
                    self.visited.add(url)

            except Exception as e:
                print(f"抓取 {url} 出错: {e}")

            self.queue.task_done()

    def start(self):
        threads = []
        for _ in range(self.max_threads):
            t = threading.Thread(target=self.crawl)
            t.start()
            threads.append(t)

        self.queue.join()

        for _ in range(self.max_threads):
            self.queue.put(None)

        for t in threads:
            t.join()

        print(f"抓取完成，共访问了 {len(self.visited)} 个页面")


if __name__ == "__main__":
    crawler = Crawler("https://example.com")
    crawler.start()