# # import random
# # import requests

# # def get_random_cn_headers():
# #     user_agents = [
# #         # Chrome on Windows
# #         "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
# #         # Chrome on macOS
# #         "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
# #         # Edge on Windows
# #         "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.31",
# #         # Chrome on Android
# #         "Mozilla/5.0 (Linux; Android 11; Pixel 5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Mobile Safari/537.36",
# #     ]

# #     headers = {
# #         "User-Agent": random.choice(user_agents),
# #         "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
# #         "Accept-Encoding": "gzip, deflate, br",
# #         "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
# #         "Connection": "keep-alive",
# #         "DNT": "1",
# #         "Upgrade-Insecure-Requests": "1"
# #     }

# #     return headers


# # session = requests.Session()
# # session.headers.update(get_random_cn_headers())

# # r = session.get("https://pb.eclicklink.com/5ZS8GM/7XDN2/")
# # pass #print(r.text)
# from curl_cffi import requests
# from urllib.parse import urljoin
# import random

# # 随机中文浏览器 UA
# def get_random_headers():
#     user_agents = [
#         "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
#         "Mozilla/5.0 (Macintosh; Intel Mac OS X 13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
#     ]
#     return {
#         "User-Agent": random.choice(user_agents),
#         "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
#         "Accept-Language": "zh-CN,zh;q=0.9",
#         "Referer": "https://www.baidu.com/",
#     }

# # ✅ 跳转追踪核心函数
# def trace_redirect_chain(start_url, max_hops=10, proxy=None):
#     headers = get_random_headers()
#     chain = []
#     current_url = start_url

#     for i in range(max_hops):
#         pass #print(f"[{i+1}] 请求：{current_url}")
#         try:
#             resp = requests.get(
#                 current_url,
#                 impersonate="chrome120",
#                 headers=headers,
#                 allow_redirects=False,
#                 timeout=10,
#                 proxies={"http": proxy, "https": proxy} if proxy else None,
#                 # http2=True  ← 这个删掉
#             )
#             pass #print(resp.text)
#         except Exception as e:
#             pass #print(f"⚠️ 请求失败：{e}")
#             break

#         chain.append(current_url)

#         if not (300 <= resp.status_code < 400):
#             pass #print(f"✅ 跳转结束，状态码: {resp.status_code}")
#             break

#         location = resp.headers.get("Location")
#         if not location:
#             pass #print("❌ 找不到 Location 头，跳转链中断")
#             break

#         # 修正相对路径跳转
#         current_url = urljoin(current_url, location)

#     return chain

# # ✅ 示例调用
# if __name__ == "__main__":
#     url = "https://pb.eclicklink.com/5ZS8GM/7XDN2/"  # 你的追踪链接
#     proxy = None  # 示例: "http://127.0.0.1:7890"
#     chain = trace_redirect_chain(url, proxy=proxy)

#     pass #print("\n🔁 跳转链如下：")
#     for idx, u in enumerate(chain, 1):
#         pass #print(f"[{idx}] {u}")

from playwright.sync_api import sync_playwright
from datetime import datetime
from urllib.parse import urlparse
import json
import time
import requests
from fake_useragent import UserAgent


# 获取代理
def get_ua():
    common_desktop_and_mobile = ['chrome', 'firefox', 'safari', 'edge', 'opera']
    while True:
        user_agent = UserAgent(os=["windows", "macos"],platforms=["pc"]).random
        if any(device in user_agent.lower() for device in common_desktop_and_mobile):
            return user_agent

def trace_redirect_with_browser(url, proxy=None, headless=True):
    all_urls = []

    with sync_playwright() as p:
        browser = p.chromium.launch(headless=headless)
        context_args = {}

        if proxy:
            context_args["proxy"] = {"server": proxy}

        context = browser.new_context(**context_args)
        page = context.new_page()

        def on_request(request):
            if request.is_navigation_request():
                all_urls.append(request.url)

        page.on("request", on_request)

        pass #print(f"🌐 打开初始页面：{url}")
        page.goto(url, timeout=30000, wait_until="load")

        # 等待 JS 执行跳转
        for _ in range(10):
            pass #print(f"⏳ 当前URL：{page.url}")
            time.sleep(1)

        # 确保当前页也加进去
        if not all_urls or all_urls[-1] != page.url:
            all_urls.append(page.url)

        browser.close()

    pass #print("\n🔁 跳转链：")
    for i, u in enumerate(all_urls, 1):
        pass #print(f"[{i}] {u}")

    return all_urls

def get_proxy6(country="US"):
    # https://www.ipwo.net/user/ucenter/api_extraction 
    url = f"https://www.ipwo.net/api/proxy/get_proxy_ip?num=1&regions={country}&protocol=socks5&return_type=json&lb=4&sb="
    i = 0
    while i < 5:
        # 每次提取10个，放入线程中
        resp = requests.get(url=url, timeout=5)
        try:
            if resp.status_code == 200:
                dataBean = json.loads(resp.text)
                pass #print(dataBean)
            else:
                i += 1
                pass #print("获取失败")
                time.sleep(1)
                continue
        except ValueError:
            i += 1
            pass #print("获取失败")
            time.sleep(1)
            continue
        else:
            # 解析json数组，获取ip和port
            pass #print("code=", dataBean["code"])
            code = dataBean["code"]
            if code == 0:
                for proxy in dataBean["data"]:
                    return proxy  # {'ip': '43.153.97.48', 'port': 10448}
        i += 1
        pass #print(i)

# 示例调用
if __name__ == "__main__":
    test_url = "https://kn.kreatornow.com/kn/click?prop=1069331&brand_id=1004889&mark=%7B%7BDATETIME%7D%7D&dest=https%3A%2F%2Fwww.levi.com%2FUS%2Fen_US%2F"
    trace_redirect_with_browser(test_url, proxy=None, headless=True)