import time

from playwright.sync_api import Playwright, sync_playwright, expect
from bs4 import BeautifulSoup as bs
import requests


def run(playwright: Playwright) -> None:
    browser = playwright.chromium.launch(headless=False)
    context = browser.new_context()
    page = context.new_page()
    page.goto("https://live.douyin.com/")
    page.get_by_role("heading", name="推荐直播").click()
    for _ in range(10):  # 根据需要调整次数
        page.mouse.wheel(0, 1000)
        # page.keyboard.press("PageDown")
        page.wait_for_timeout(1000)  # 每次按键后等待加载
    # 或者模拟逐步滚动到页面底部
    content = page.content()
    soup = bs(content, 'html.parser')
    cate_html_list = []
    for a in soup.find_all('a', href=True):
        href = a['href']
        if "https://live.douyin.com/" in href and href.split('/')[-1].isdigit():
            # Try to locate the parent or sibling elements containing the streamer name and online users
            parent_div = a.find_parent('div')  # Assuming the parent div contains relevant info
            cate_html_list.append(parent_div)

    cate_html_list = list(set(cate_html_list))
    top_live_ids = []
    for cate_html in cate_html_list:
        douyin_links = cate_html.find_all('a', href=True)
        live_ids = [link['href'].split('/')[-1] for link in douyin_links if "https://live.douyin.com/" in link['href']]
        for live_id in live_ids:
            requests.get(f"http://127.0.0.1:8000/api/dy/spider_script_run?live_id={live_id}")
            top_live_ids.append(live_id)
    context.close()
    browser.close()


with sync_playwright() as playwright:
    run(playwright)
