TARGET_URL = "https://www.szse.cn/disclosure/listed/notice/index.html"
DOWNLOAD_URLS = []
RESULT_FILE = "result{}-{}.json"

# 使用playwright库进行爬取
from playwright.sync_api import sync_playwright
import json
from tqdm import tqdm
PROXY = '39.106.255.102'
# Playwright的代理配置格式
proxy = {
    'server': f'http://{PROXY}:80',
    'username': 'Xniao',
    'password': '654321'
}

def init_browser(playwright):
    browser = playwright.chromium.launch(
        # headless=False,
        args=['--start-maximized'],
        proxy=proxy  # 使用正确格式的代理配置
    )
    context = browser.new_context(
        viewport=None,
        no_viewport=True,
        java_script_enabled=True,
        ignore_https_errors=True,
        user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
    )
    
        
    return browser, context


# 模拟点击功能，设置搜索条件
def set_search_params(page):
    page.locator("a").filter(has_text="请选择公告类别").click()
    page.get_by_text("中介机构报告").click()
    page.get_by_placeholder("代码/简称/拼音/标题关键字").click()
    page.get_by_placeholder("代码/简称/拼音/标题关键字").fill("会计师事务所")
    page.get_by_role("button", name="查询").click()
    

def loop_table_data(page):
    attach_ids = []
    try:
        table = page.locator(".disclosure-tbody")
        table.wait_for(state="visible", timeout=2000)
        page.pause()
        # 找到每个tr下第三个a标签
        sub_page_urls = page.locator("a[attachid]")
        sub_page_urls = sub_page_urls.all()
        
        # 获取每个sub_page_url的attachid属性
        for sub_page_url in tqdm(sub_page_urls, desc="当前页面进度"):
            attach_ids.append(sub_page_url.get_attribute("attachid"))

    except Exception as e:
        print(f"获取表格数据时出错: {e}")
    
    return attach_ids

def run_single_thread(start_page=1, end_page=30):
    """单线程版本的爬取函数"""
    all_attach_ids = []
    
    with sync_playwright() as playwright:
        browser, context = init_browser(playwright)
        page = context.new_page()
        page.goto(TARGET_URL)
        
        page.wait_for_timeout(1500)
        set_search_params(page)
        
        # 翻到指定页码
        if start_page > 1:
            # 计算需要点击快速翻页的次数和剩余普通翻页的次数
            quick_jumps = (start_page - 1) // 5
            remaining_pages = (start_page - 1) % 5
            
            # 使用快速翻页
            for _ in range(quick_jumps):
                page.evaluate("""() => {
                    document.querySelector("li.ellipsed.ellipsed-next a").click();
                }""")
                page.wait_for_timeout(1000)  # 等待页面加载
            
            # 处理剩余的页数
            for _ in range(remaining_pages):
                page.evaluate("""() => {
                    document.querySelector("li.next[data-show='next'] a").click();
                }""")
                page.wait_for_timeout(1000)  # 等待页面加载
        
        # 遍历每一页
        for page_num in tqdm(range(start_page, end_page + 1), desc="总体爬取进度"):
            try:
                # 如果不是第一页，需要翻页
                if page_num > start_page:
                    page.evaluate("""() => {
                        document.querySelector("li.next[data-show='next'] a").click();
                    }""")
                    page.wait_for_timeout(1000)  # 增加等待时间以确保页面加载完成
                
                # 获取前页面数据
                attach_ids = loop_table_data(page)
                all_attach_ids.extend(attach_ids)
                
                # 定期保存结果
                with open(RESULT_FILE.format(start_page, end_page), "w") as f:
                    json.dump(all_attach_ids, f, indent=2)
                    
                print(f"第{page_num}页完成，当前共获取到 {len(all_attach_ids)} 个下载链接")
                
            except Exception as e:
                print(f"爬取第{page_num}页时出错: {e}")
                # 保存已爬取的数据
                with open(RESULT_FILE.format(start_page, end_page), "w") as f:
                    json.dump(all_attach_ids, f, indent=2)
                break
        
        # 关闭浏览器
        context.close()
        browser.close()
    
    print(f"爬取完成，共获取到 {len(all_attach_ids)} 个下载链接")
    return all_attach_ids

# 修改主程序入口
if __name__ == "__main__":
    # 使用多线程版本
    # run_multi_thread(start_page=41, end_page=70)
    
    # 使用单线程版本
    run_single_thread(start_page=71, end_page=170)

