import requests
from urllib.robotparser import RobotFileParser
from bs4 import BeautifulSoup
import csv
from urllib.parse import urljoin, urlparse
import time

class SimpleWebCrawler:
    def __init__(self, base_url):
        self.base_url = base_url
        self.visited = set()
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })
        
        # 解析robots.txt
        self.rp = RobotFileParser()
        robots_url = urljoin(base_url, '/robots.txt')
        self.rp.set_url(robots_url)
        try:
            self.rp.read()
        except Exception as e:
            print(f"无法获取robots.txt: {e}")
            self.rp = None

    def is_allowed(self, url):
        if self.rp is None:
            return True
        return self.rp.can_fetch('*', url)

    def fetch(self, url, delay=1):
        time.sleep(delay)  # 遵守爬取速率限制
        try:
            if url not in self.visited and self.is_allowed(url):
                response = self.session.get(url, timeout=10)
                response.raise_for_status()
                self.visited.add(url)
                return response.text
            return None
        except requests.exceptions.RequestException as e:
            print(f"请求失败: {url} - {e}")
            return None

    def parse(self, html, current_url):
        soup = BeautifulSoup(html, 'html.parser')
        data = []
        
        # 示例：提取所有标题和段落
        for header in soup.find_all(['h1', 'h2', 'h3']):
            data.append({
                'type': 'header',
                'content': header.text.strip()
            })
            
        for paragraph in soup.find_all('p'):
            data.append({
                'type': 'paragraph',
                'content': paragraph.text.strip()
            })
        
        # 提取所有链接
        links = []
        for link in soup.find_all('a', href=True):
            absolute_url = urljoin(current_url, link['href'])
            if urlparse(absolute_url).netloc == urlparse(self.base_url).netloc:
                links.append(absolute_url)
                
        return data, links

    def save_to_csv(self, data, filename='output.csv'):
        with open(filename, 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            for item in data:
                writer.writerow([item['type'], item['content']])

    def crawl(self, max_pages=3):
        queue = [self.base_url]
        count = 0
        
        while queue and count < max_pages:
            current_url = queue.pop(0)
            
            html = self.fetch(current_url)
            if html:
                data, links = self.parse(html, current_url)
                self.save_to_csv(data)
                
                # 添加新链接到队列
                queue.extend([link for link in links if link not in self.visited])
                count += 1
                print(f"已抓取: {current_url}")

if __name__ == "__main__":
    # 使用示例（请遵守目标网站规则）
    crawler = SimpleWebCrawler('http://120.24.224.148/')
    crawler.crawl(max_pages=2)