import json
import asyncio
from playwright.async_api import async_playwright

async def scrape_table():
    async with async_playwright() as p:
        browser = await p.chromium.launch(headless=False)
        page = await browser.new_page()
        await page.goto(
            "https://app.sensortower-china.com/top-charts?country=US&category=0&date=2025-09-01&device=iphone&os=ios"
        )

        # 等待表格加载
        await page.wait_for_selector("#mainContent table tbody tr")

        # ---- 自动滚动加载全部数据 ----
        last_height = 0
        while True:
            current_height = await page.evaluate("document.body.scrollHeight")
            if current_height == last_height:
                break  # 页面高度没有变化 → 没有新数据了
            last_height = current_height
            await page.evaluate("window.scrollBy(0, document.body.scrollHeight)")
            await page.wait_for_timeout(1000)  # 等待加载完成

        # ---- 抓取所有行数据 ----
        rows = await page.query_selector_all("#mainContent table tbody tr")
        data = []
        for row in rows:
            cells = await row.query_selector_all("td")
            if not cells or len(cells) < 2:
                continue

            # 只解析第2列（应用信息）
            app_info = await cells[1].inner_text()
            parts = [line.strip() for line in app_info.split("\n") if line.strip()]

            # 自动映射到字段
            item = {
                "name": parts[0] if len(parts) > 0 else "",
                "publisher": parts[1] if len(parts) > 1 else "",
                "downloads": parts[2] if len(parts) > 2 else "",
                "rank_change": parts[3] if len(parts) > 3 else "",
                "price_info": parts[4] if len(parts) > 4 else ""
            }
            data.append(item)

        # ---- 保存为 JSON 文件 ----
        with open("apps.json", "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False, indent=4)

        print(f"已抓取 {len(data)} 条记录，保存到 apps.json")
        await browser.close()

asyncio.run(scrape_table())
