from playwright.async_api import async_playwright
from eroica import headers_dict, sync_headers, xpath, url_add
import asyncio


async def get_data(page, url):
    await page.goto(url)
    html = await page.content()
    xpath_dict = {
        'title': '//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/a/h2/font/font/text()',
        'lable': '//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[1]/button/span/font/font/text()',
        'blurb': '//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[4]/p/font/font/text()',
    }
    data = xpath(html, xpath_dict)
    return data


async def main():
    async with async_playwright() as p:
        url = 'https://antispider4.scrape.center/'
        headers = headers_dict('''
        accept
        text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7
        accept-encoding
        gzip, deflate, br, zstd
        accept-language
        zh-CN,zh;q=0.9,zh-TW;q=0.8
        cache-control
        max-age=0
        connection
        keep-alive
        host
        antispider3.scrape.center
        if-modified-since
        Sun, 12 Sep 2021 11:00:06 GMT
        if-none-match
        "613dddb6-3df"
        referer
        https://scrape.center/
        sec-ch-ua
        "Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"
        sec-ch-ua-mobile
        ?0
        sec-ch-ua-platform
        "Windows"
        sec-fetch-dest
        document
        sec-fetch-mode
        navigate
        sec-fetch-site
        same-site
        sec-fetch-user
        ?1
        upgrade-insecure-requests
        1
        user-agent
        Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36
        ''')
        browser = await p.chromium.launch( headless=False )  # 启动浏览器
        context = await browser.new_context(extra_http_headers=sync_headers(headers))  # 创建一个浏览器环境，设置headers
        page = await context.new_page()  # 打开一个页面
        await page.goto(url)  # 访问url
        html = await page.content()  # 获取页面内容
        print(html)
        url_get_data = xpath(html, '//*[@id="index"]/div[1]/div[1]/div/div/div/div[2]/a/@href')
        urls = url_add(url, url_get_data)
        for url in urls:
            data = await get_data(page, url)
            print(data)
        if input('是否退出？(y/n)').lower() == 'y':
            await browser.close()  # 关闭浏览器

asyncio.run(main())  # 运行异步函数
