from operator import truediv
import asyncio

from playwright.async_api import async_playwright
from playwright.sync_api import sync_playwright
async def safe_goto(page, url, retries=3, wait=1000):
    """带重试的访问函数"""
    for i in range(retries):
        try:
            await page.goto(url, timeout=30000)
            return True
        except Exception as e:
            print(f"第 {i+1} 次访问失败: {e}")
            await page.wait_for_timeout(wait)
    return False

async def scrape_table():
    async with async_playwright() as p:
        browser = await p.chromium.launch(headless=False)
        page = browser.new_page()
        await page.goto("https://www.baidu.com")

        await page.wait_for_selector("#mainContent table tbody tr")
        body = await page.query_selector("#mainContent table tbody")

        rows = await body.query_selector_all("tr")
        table_data = []
        for index,row in enumerate(rows,start=1):
            text = row.inner_text()
            icon_el = await row.query_selector("td:nth-child(1)")
            icon_url = await icon_el.get_attribute("td:nth-child(1) img") if icon_el else ""

            link_el = await row.query_selector("选择某个具体的元素")
#             在拿到这个元素的时候，开始拿取这个元素里面的属性
            link_href = await link_el.get_attribute("href") if link_el else ""

#             在拿到这个链接的时候再进行一次选取元素
            # 默认值
            downloads = "N/A"
            revenue = "N/A"
asyncio.run(scrape_table())
