# demo_spider.py
import requests
from bs4 import BeautifulSoup
import csv
from urllib.parse import urlparse

def is_valid_url(url):
    """验证URL格式是否合法"""
    try:
        result = urlparse(url)
        return all([result.scheme, result.netloc])
    except ValueError:
        return False

def fetch_web_content(url):
    """获取网页内容"""
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    
    try:
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"请求失败: {e}")
        return None

def parse_content(html):
    """解析网页内容"""
    soup = BeautifulSoup(html, 'html.parser')
    
    # 提取数据
    data = {
        'title': soup.title.string.strip() if soup.title else '无标题',
        'paragraphs': [p.get_text().strip() for p in soup.find_all('p')],
        'links': [a['href'] for a in soup.find_all('a', href=True)]
    }
    return data

def save_to_csv(data, filename='output.csv'):
    """保存数据到CSV文件"""
    with open(filename, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['类型', '内容'])
        
        writer.writerow(['标题', data['title']])
        for p in data['paragraphs']:
            writer.writerow(['段落', p])
        for link in data['links']:
            writer.writerow(['链接', link])

def main():
    # 目标网址（示例使用测试网站）
    target_url = 'http://books.toscrape.com/catalogue/page-1.html'
    
    if not is_valid_url(target_url):
        print("无效的URL")
        return
    
    print("开始抓取数据...")
    html = fetch_web_content(target_url)
    
    if html:
        parsed_data = parse_content(html)
        save_to_csv(parsed_data)
        print(f"数据已保存到 {parsed_data['title'][:20]}... 的output.csv文件")

if __name__ == "__main__":
    main()
