import re
import time

import requests
from playwright.sync_api import sync_playwright
def capsolver(siteurl, sitekey):
    api_key="CAP-F4615BF085D8FE1CEFD3D279402C268A"
    payload = {
        "clientKey": api_key,
        "task": {
            "type": 'ReCaptchaV2TaskProxyLess',
            "websiteKey": sitekey,
            "websiteURL": siteurl
        }
    }
    res = requests.post("https://api.capsolver.com/createTask", json=payload)
    resp = res.json()
    task_id = resp.get("taskId")
    if not task_id:
        print("Failed to create task:", res.text)
        return
    print(f"Got taskId: {task_id} / Getting result...")

    while True:
        time.sleep(1)  # delay
        payload = {"clientKey": api_key, "taskId": task_id}
        res = requests.post("https://api.capsolver.com/getTaskResult", json=payload)
        resp = res.json()
        status = resp.get("status")
        if status == "ready":
            return resp.get("solution", {}).get('gRecaptchaResponse')
        if status == "failed" or resp.get("errorId"):
            print("Solve failed! response:", res.text)
            return
if __name__ == '__main__':
    # url = "https://www.aliexpress.com/store/1101275207/pages/all-items.html?spm=a2g0o.store_pc_home.pcShopHead_11173042.1&shop_sortType=bestmatch_sort&SearchText=Google"
    # with sync_playwright() as p:
    #     browser = p.chromium.connect('wss://production-sfo.browserless.io/chrome/playwright?token=QcDUw0Uu5aw59kedfea74fe0859ac73025650e8c68&timeout=1800000&blockAds=true&launch={"stealth":true,"headless": false}')
    #     context = browser.new_context(viewport={"width": 3840, "height": 2160})
    #     context.set_default_timeout(5000)
    #     first_page = context.new_page()
    #     first_page.goto(url, timeout=0, wait_until="domcontentloaded")
    #     first_page.reload(timeout=0, wait_until="domcontentloaded")
    #     first_page.wait_for_timeout(5000)
    #     first_page.screenshot(path="./download/list.jpg")
    #     frame = first_page.frame(name="baxia-dialog-content")
    #     if frame:
    #         print("存在列表滑块。滑动")
    #         slide_btn = frame.locator('xpath=//*[@id="nc_1_n1z"]')
    #         if slide_btn.count() > 0:
    #             frame_ele = first_page.locator('xpath=//*[@id="baxia-dialog-content"]').first
    #             print("找到滑块准备滑动")
    #             box = slide_btn.bounding_box()
    #             frambox = frame_ele.bounding_box()
    #             # 计算起始位置和目标位置
    #             start_x = box["x"] + box["width"] / 2 + 2
    #             start_y = box["y"] + box["height"] / 2 - 1
    #             print(f"x:{start_x}")
    #             print(f"y:{start_y}")
    #             print(f"frame x : {frambox['x']}")
    #             print(f"frame y : {frambox['y']}")
    #             end_x = start_x + 300  # 向右拖动 100 像素
    #             # 执行滑动操作
    #             first_page.mouse.move(start_x, start_y)
    #             first_page.mouse.down()
    #             steps = 50  # 滑动的步数
    #             for i in range(steps):
    #                 # 计算当前步的目标位置
    #                 new_x = start_x + (end_x - start_x) * (i + 1) / steps
    #                 frame.mouse.move(new_x, start_y)
    #             first_page.mouse.up()
    #             print("滑块滑动完成")
    #             first_page.screenshot(path="./download/slide.jpg")


    url = "https://www.aliexpress.us/item/3256807307914628.html?pdp_npi=4%40dis%21USD%21US%20%240.69%21US%20%240.69%21%21%210.69%210.69%21%40212bc76417237145253751518e8716%2112000041015913321%21sh%21US%212609591076%21X&spm=a2g0o.store_pc_allItems_or_groupList.new_all_items_2007586049610.1005007494229380&gatewayAdapt=glo2usa"
    with sync_playwright() as p:
        browser = p.chromium.connect('wss://production-sfo.browserless.io/chrome/playwright?token=QcDUw0Uu5aw59kedfea74fe0859ac73025650e8c68&timeout=1800000&blockAds=true&launch={"stealth":true,"headless": false}')
        context = browser.new_context(viewport={"width": 3840, "height": 2160})
        first_page = context.new_page()
        first_page.goto(url, timeout=0, wait_until="domcontentloaded")
        first_page.reload(timeout=0, wait_until="domcontentloaded")
        first_page.wait_for_timeout(5000)
        first_page.screenshot(path="./download/list.jpg")
        tartget_frame = None
        for frame in first_page.frames:
            checkbox = frame.locator('xpath=//span[contains(@class, "recaptcha-checkbox")]')
            res = re.findall("sitekey:\"[\d\w\-\.:\/]*", frame.content())
            if len(res) > 0:
                with open("./download/sitkey.txt", 'w') as f:
                    f.write(frame.content())
                sitekey = res[0][9:]
                fatherframe = frame
            if checkbox.count() > 0:
                with open("./download/recaptcha.txt", 'w') as f:
                    f.write(frame.content())
                tartget_frame = frame
        if tartget_frame:
            print("找到校验")
            checkbox = tartget_frame.locator('xpath=//span[contains(@class, "recaptcha-checkbox")]')
            res = re.findall("sitekey:\"[\d\w\-\.:\/]*", tartget_frame.content())
            if sitekey:
                print(f"找到sitekey:{sitekey}")
                start = time.time()
                token = capsolver(url,sitekey)
                print(f"用时:{time.time() - start}获取到token：{token}")
                # fatherframe.evaluate('document.getElementById("g-recaptcha-response").innerText ="'+token+'";')
                fatherframe.evaluate('__recaptchaValidateCB__("'+token+'");')
                # print("设置的token："+fatherframe.locator('xpath=//textarea[@id="g-recaptcha-response"]').first.inner_text())
                # box = checkbox.bounding_box()
                # x = box['x']
                # y = box['y']
                # print(f"x:{x}")
                # print(f"y:{y}")
                # first_page.mouse.move(x=x, y=y, steps=10)
                # first_page.wait_for_timeout(50)
                # print("点击")
                # first_page.mouse.down(button="left")
                # tartget_frame.click(selector='xpath=//span[contains(@class, "recaptcha-checkbox")]', button="left")
                first_page.wait_for_timeout(3000)
                # with open("./download/afterclick.txt", 'w') as f:
                #     f.write(fatherframe.content())
                # with open("./download/afterclick2.txt", 'w') as f:
                #     f.write(tartget_frame.content())
                first_page.screenshot(path="./download/afterclick.jpg")

