from playwright.sync_api import sync_playwright
from bs4 import BeautifulSoup
import time

def fetch_content(page, url):
    try:
        page.goto(url, wait_until="networkidle")
        return page.content()
    except Exception as e:
        print(f"Error fetching {url}: {e}")
        return None

def parse_and_save(content, file):
    if content:
        soup = BeautifulSoup(content, 'html.parser')
        # 提取文本，这里以提取所有段落为例
        paragraphs = soup.find_all('p')
        for p in paragraphs:
            text = p.get_text().strip()
            if text:  # 只保存非空文本
                file.write(text + '\n\n')

def crawl_website(page, url, file):
    content = fetch_content(page, url)
    if content:
        file.write(f"\n\n--- Content from {url} ---\n\n")
        parse_and_save(content, file)

def main():
    websites = [
        "https://hrss.sz.gov.cn/xxgk/zcfgjjd/zcfg/jycy/content/post_11076312.html",  # 深圳市人力资源和社会保障局
        "https://www.szlh.gov.cn/zwgk/zdlyxxgk/jycy/zczy/content/post_11215617.html",  # 深圳人才网
        "https://www.szns.gov.cn/bsfw/ztfw/jyzt/zpxx/content/post_11418859.html",  # 深圳市政府通知公告
        "https://www.szgm.gov.cn/szgm/132104/132207/_541168/_541195/content/post_10071821.html",  # 深圳市政府政策法规
    ]

    with sync_playwright() as p:
        browser = p.chromium.launch(headless=True)
        context = browser.new_context(
            user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        )
        page = context.new_page()

        with open('graduate_employment_info.txt', 'w', encoding='utf-8') as file:
            for website in websites:
                crawl_website(page, website, file)
                time.sleep(2)  # 在请求之间添加延迟，以避免过于频繁的请求

        browser.close()

    print("爬取完成，数据已保存到 graduate_employment_info.txt")

if __name__ == "__main__":
    main()