import requests
from bs4 import BeautifulSoup
import os

# 定义要访问的URL列表
urls = [
    "https://detail.zol.com.cn/cpu_advSearch/subcate28_1.html",
    "https://www.2345.com/",
    "https://music.163.com/"
]

# 遍历URL列表
for url in urls:
    try:
        # 发送HTTP GET请求
        response = requests.get(url)
        # 检查请求是否成功
        response.raise_for_status()
        
        # 提取文件名
        filename = url.split("/")[-1]
        if not filename:
            filename = url.split("/")[-2]
        
        # 保存网页内容到文件
        html_filename = f"{filename}.html"
        with open(html_filename, "w", encoding="utf-8") as file:
            file.write(response.text)
        
        print(f"成功保存 {url} 的内容到 {html_filename}")
        
        # 解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取网页标题
        title = soup.title.string if soup.title else "No title found"
        print(f"网页标题: {title}")
        
        # 提取网页中的所有段落文本
        paragraphs = soup.find_all('p')
        paragraph_texts = [p.get_text(strip=True) for p in paragraphs]
        
        # 保存提取的数据到文件
        data_filename = f"{filename}_data.txt"
        with open(data_filename, "w", encoding="utf-8") as data_file:
            data_file.write(f"Title: {title}\n\n")
            data_file.write("Paragraphs:\n")
            for i, text in enumerate(paragraph_texts, 1):
                data_file.write(f"{i}. {text}\n")
        
        print(f"成功保存 {url} 的数据到 {data_filename}")
    
    except requests.exceptions.RequestException as e:
        print(f"访问 {url} 时出错: {e}")