from playwright.sync_api import sync_playwright
import json
from crawl_doc import init_browser
import threading
from queue import Queue
import os
from tqdm import tqdm
import time
import random

TARGET_URL = "https://www.szse.cn/disclosure/listed/bulletinDetail/index.html?{}"
MAX_WORKERS = 4  # 同时运行的线程数
SAVE_INTERVAL = 20  # 每处理10个项目保存一次进度
MAX_REQUESTS_PER_BROWSER = 80  # 每个浏览器最大请求数
MAX_RETRIES = 3  # 最大重试次数
RETRY_DELAY = 5  # 重试等待时间(秒)

def get_url_from_page(page):
    try:
        page.wait_for_selector("#annouceDownloadBtn", state="visible", timeout=60000)
        return page.locator("#annouceDownloadBtn").get_attribute("href")
    except Exception as e:
        print(f"获取下载按钮失败: {str(e)}")
        raise

def load_progress():
    if os.path.exists("download_attachid.json"):
        with open("download_attachid.json", "r") as f:
            return set(json.load(f))
    return set()

def load_urls():
    if os.path.exists("download_urls.json"):
        with open("download_urls.json", "r") as f:
            return json.load(f)
    return []

def save_progress(completed_ids, results):
    # 保存已完成的ID列表
    with open("download_attachid.json", "w") as f:
        json.dump(list(completed_ids), f, indent=2)
    
    # 保存下载URL结果
    with open("download_urls.json", "w") as f:
        json.dump(results, f, indent=2)

def get_download_urls(attach_ids):
    # 加载已完成的进度
    completed_ids = load_progress()
    print(f"已完成的任务数: {len(completed_ids)}")
    
    # 过滤掉已经完成的任务
    attach_ids = [aid for aid in attach_ids if aid not in completed_ids]
    total_tasks = len(attach_ids)
    print(f"待处理的任务数: {total_tasks}")
    
    # 初始化队列和结果列表
    attach_ids_queue = Queue()
    for aid in attach_ids:
        attach_ids_queue.put(aid)
    
    results = load_urls()
    lock = threading.Lock()
    
    # 创建进度条
    pbar = tqdm(total=total_tasks, desc="下载进度")
    
    # 修改 worker 函数为闭包，这样可以访问 pbar
    def worker_with_progress(attach_ids_queue, results, completed_ids, lock):
        request_count = 0
        
        while True:
            # 如果请求数达到上限，重新创建浏览器实例
            if request_count >= MAX_REQUESTS_PER_BROWSER:
                if 'browser' in locals():
                    browser.close()
                request_count = 0
                
            # 创建新的浏览器实例
            if request_count == 0:
                try:
                    playwright = sync_playwright().start()
                    browser, context = init_browser(playwright)
                    page = context.new_page()
                except Exception as e:
                    print(f"创建浏览器实例失败: {str(e)}")
                    if 'playwright' in locals():
                        playwright.stop()
                    continue

            try:
                attach_id = attach_ids_queue.get_nowait()
            except Queue.empty:
                if 'browser' in locals():
                    browser.close()
                if 'playwright' in locals():
                    playwright.stop()
                break

            retry_count = 0
            while retry_count < MAX_RETRIES:
                try:
                    # 添加随机延迟，避免请求过于频繁
                    time.sleep(random.uniform(1, 3))
                    
                    page.goto(TARGET_URL.format(attach_id))
                    page.wait_for_load_state("networkidle", timeout=60000)
                    download_url = get_url_from_page(page)
                    
                    with lock:
                        results.append({
                            'attach_id': attach_id,
                            'download_url': download_url
                        })
                        completed_ids.add(attach_id)
                        pbar.update(1)
                        
                        if len(completed_ids) % SAVE_INTERVAL == 0:
                            save_progress(completed_ids, results)
                
                    request_count += 1
                    break  # 成功获取，跳出重试循环
                    
                except Exception as e:
                    retry_count += 1
                    print(f"处理 attach_id {attach_id} 时出错 (尝试 {retry_count}/{MAX_RETRIES}): {str(e)}")
                    
                    if retry_count < MAX_RETRIES:
                        print(f"等待 {RETRY_DELAY} 秒后重试...")
                        time.sleep(RETRY_DELAY)
                        
                        # 如果是浏览器相关错误，重新创建浏览器实例
                        if "Target page, context or browser has been closed" in str(e):
                            if 'browser' in locals():
                                browser.close()
                            if 'playwright' in locals():
                                playwright.stop()
                            playwright = sync_playwright().start()
                            browser, context = init_browser(playwright)
                            page = context.new_page()
                            request_count = 0
                    else:
                        print(f"已达到最大重试次数，跳过 attach_id {attach_id}")
                
                finally:
                    if retry_count == MAX_RETRIES:
                        attach_ids_queue.task_done()

        if 'browser' in locals():
            browser.close()
        if 'playwright' in locals():
            playwright.stop()
    
    # 创建并启动工作线程
    threads = []
    for _ in range(MAX_WORKERS):
        thread = threading.Thread(
            target=worker_with_progress,
            args=(attach_ids_queue, results, completed_ids, lock)
        )
        thread.start()
        threads.append(thread)
    
    # 等待所有线程完成
    for thread in threads:
        thread.join()
    
    # 关闭进度条
    pbar.close()
    
    # 最后保存一次进度
    save_progress(completed_ids, results)
    return results

if __name__ == "__main__":
    with open("result.json", "r") as f:
        attach_ids = json.load(f)
    results = get_download_urls(attach_ids)