import requests
from bs4 import BeautifulSoup
import os
import re

def download_image(image_url, save_path):
    try:
        response = requests.get(image_url, stream=True)
        if response.status_code == 200:
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(1024):
                    f.write(chunk)
            print(f"Image saved to {save_path}")
        else:
            print(f"Failed to download image from {image_url}")
    except Exception as e:
        print(f"Error downloading image: {e}")

def scrape_baidu_baike(url, save_dir):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
    }
    
    response = requests.get(url, headers=headers)
    if response.status_code != 200:
        print(f"Failed to fetch URL: {url}")
        return

    soup = BeautifulSoup(response.text, 'html.parser')
    
    # 找到“第六期”相关内容
    #for link in soup.find_all('a'):
    #    print(link.get('href'))
    sixth_phase_section = soup.find('span', string=re.compile(r'第六期'))
    if sixth_phase_section is None:
        print("Couldn't find the '第六期' section.")
        return

    # 获取第六期的父节点及相关内容
    parent_h2 = sixth_phase_section.find_parent('h2')
    if parent_h2 is None:
        print("Couldn't find the parent 'h2' for '第六期'.")
        return

    sixth_phase_content = parent_h2.find_next_sibling('div')
    if sixth_phase_content is None:
        print("Couldn't find the content for '第六期'.")
        return
    




    # 创建保存目录
    os.makedirs(save_dir, exist_ok=True)

    # 提取参赛人员信息
    for li in sixth_phase_content.find_all('li'):
        person_info = li.get_text(strip=True)
        print(f"Participant Info: {person_info}")

        # 找到图片
        img_tag = li.find('img')
        if img_tag and 'src' in img_tag.attrs:
            img_url = 'https:' + img_tag['src']  # 拼接完整的图片 URL
            img_name = img_url.split('/')[-1]
            save_path = os.path.join(save_dir, img_name)
            download_image(img_url, save_path)

# 爬取百度百科“第六期”参赛人员信息
url = "https://baike.baidu.com/item/%E4%B8%AD%E5%9B%BD%E5%A5%BD%E5%A3%B0%E9%9F%B3%E7%AC%AC%E4%B8%80%E5%AD%A3/16843195"
save_directory = "./sixth_phase_participants"
scrape_baidu_baike(url, save_directory)
