import requests
from lxml import etree
import concurrent.futures


def fetch_page(num):
    url = (
        f"https://www.bjcourt.gov.cn/zxxx/indexOld.htm?st=1&zxxxlx=100013007&bzxrlx=&bzxrxm=&zrr=&frhqtzz=&jbfyId=&ah"
        f"=&dqxh=26&page={num}")
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/135.0.0.0",
        "referer": "https://www.bjcourt.gov.cn/"
    }
    try:
        req = requests.get(url, headers=headers)
        html = etree.HTML(req.text)
        names = html.xpath("//td[@class='td_even'][1]/text()")
        id_cards = html.xpath("//td[@class='td_even'][2]/text()")
        case_nums = html.xpath("//td[@class='td_even']/following-sibling::td[1]/text()")
        courts = html.xpath("//td[@class='td_even']/following-sibling::td[2]/text()")
        times = html.xpath("//td[@class='td_even']/following-sibling::td[3]/text()")

        data = []
        for i in range(len(names)):
            item = {
                "name": names[i].strip(),
                "id_card": id_cards[i].strip(),
                "case_num": case_nums[i].strip(),
                "court": courts[i].strip(),
                "time": times[i].strip()
            }
            data.append(item)

        for item in data:
            print(item)
            with open("data.txt", "a", encoding="utf-8") as f:
                f.write(f"{item['name']},{item['id_card']},{item['case_num']},{item['court']},{item['time']}\n")
        return data
    except Exception as e:
        print(f"爬取第 {num} 页时出错: {e}")
        return []


if __name__ == "__main__":
    num_pages = 1
    max_workers = 5
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        while True:
            futures = [executor.submit(fetch_page, num) for num in range(num_pages, num_pages + max_workers)]
            results = [future.result() for future in concurrent.futures.as_completed(futures)]
            if not any(results):
                print("爬取结束")
                break
            num_pages += max_workers
