import re
import requests

import sys

sys.path.append("/link/universal-template/project")


from bs4 import BeautifulSoup

from linkutil.checkUtil import check_url
from linkutil.getProxy import get_proxy6, get_proxy4, get_proxy5, get_ua

from playwright.sync_api import sync_playwright
from playwright.async_api import async_playwright
from datetime import datetime
from urllib.parse import urlparse
import json
import time
from fake_useragent import UserAgent
import asyncio
import logging
logger = logging.getLogger('link')  # 用你项目里的 logger 名称




def get_redirect_trace1(url, header, proxies=None):
    trace = []
    visited_urls = set()

    def fetch_url(current_url, header, proxies):
        current_url = current_url.replace('\\/', '/')
        if current_url in visited_urls:
            return
        visited_urls.add(current_url)
        
        try:
            trace.append((current_url, 0))
            if proxies:
                response = requests.get(current_url, headers=header, proxies=proxies, allow_redirects=False, timeout=8)
            else:
                response = requests.get(current_url, headers=header, allow_redirects=False, timeout=8)
            pass #print(current_url)
            pass #print(response.text)
            # Handle HTTP redirects
            if response.is_redirect or response.is_permanent_redirect:
                redirect_url = response.headers.get('Location')
                if redirect_url:
                    fetch_url(redirect_url, header, proxies)
                pass #print("auto 1")
            if response.status_code:
                # Handle <meta> refresh redirect
                    soup = BeautifulSoup(response.text, 'html.parser')   
                    # 处理 <a> 标签中的重定向 URL
                    link_tag = soup.find('a')
                    pass #print(link_tag)
                    if link_tag and 'href' in link_tag.attrs:
                        redirect_url = link_tag['href']
                        pass #print(f"Found manual redirect URL: {redirect_url}")
                        # 转换相对URL为绝对URL
                        redirect_url = requests.compat.urljoin(current_url, redirect_url)
                        fetch_url(redirect_url, header, proxies)
                    # <meta> refresh
                    meta_refresh = soup.find('meta', attrs={'http-equiv': 'refresh'})
                    if meta_refresh:
                        content = meta_refresh.get('content', '')
                        match = re.search(r'url=([^;]+)', content)
                        if match:
                            redirect_url = match.group(1).strip()
                            redirect_url = requests.utils.unquote(redirect_url)
                            fetch_url(redirect_url, header, proxies)
                    pass #print("auto 2")
                    # JavaScript window.location.replace
                    scripts = soup.find_all('script')
                    for script in scripts:
                        # 使用正则表达式提取 window.location.replace 里的 URL
                        match = re.search(r"window\.location\.replace\(['\"](.*?)['\"]\)", script.string if script.string else "")
                        if match:
                            redirect_url = match.group(1).strip()
                            fetch_url(redirect_url, header, proxies)
                    pass #print("auto 3")
        except requests.RequestException as e:
            pass #print(f"请求异常: {e}")

    fetch_url(url, header, proxies)
    return trace


# async def trace_redirect_with_browser(url, ipport=None, ua=None, headless=True):
#     all_urls = []

#     async with sync_playwright() as p:
#         proxy_config = {}
#         if ipport:
#             proxy_config = {
#                 "server": f"socks5://{ipport.get('ip')}:{ipport.get('port')}"
#             }

#         browser = await p.chromium.launch(headless=headless)
#         context = await browser.new_context(
#             **({"proxy": proxy_config} if proxy_config else {}),
#             user_agent = ua
#         )
#         page = await context.new_page()

#         def on_request(request):
#             if request.is_navigation_request():
#                 all_urls.append(request.url)

#         page.on("request", on_request)
#         # ip, country = fetch_ip_country(context)
#         pass #print(f"🌐 正在打开页面: {url}")
#         # pass #print(f"🌐 当前代理IP: {ip or 'N/A'} | 国家: {country or 'N/A'}")
#         try:
#             page.goto(url, timeout=30000, wait_until="domcontentloaded")
#         except Exception as e:
#             pass #print(f"⚠️ 页面跳转异常: {e}")

#         # 观察跳转变化
#         for _ in range(10):
#             pass #print(f"⏳ 当前URL: {page.url}")
#             time.sleep(1)

#         if not all_urls or all_urls[-1] != page.url:
#             all_urls.append(page.url)

#         browser.close()

#     # pass #print("\n🔁 跳转链：")
#     # for i, u in enumerate(all_urls, 1):
#     #     pass #print(f"[{i}] {u}")

#     return [(url, -1) for url in all_urls]


# async def trace_redirect_with_browser(url, ipport=None, ua=None, headless=True, referrer=None):
#     all_urls = []

#     async with async_playwright() as p:
#         proxy_config = {}
#         if ipport:
#             proxy_config = {
#                 "server": f"socks5://{ipport.get('ip')}:{ipport.get('port')}"
#             }

#         browser = await p.chromium.launch(headless=headless)
        
#         # 创建浏览器上下文时添加额外参数
#         context_options = {}
#         if proxy_config:
#             context_options["proxy"] = proxy_config
#         if ua:
#             context_options["user_agent"] = ua
        
#         context = await browser.new_context(**context_options)
#         page = await context.new_page()

#         page.on("request", lambda request: all_urls.append(request.url) if request.is_navigation_request() else None)

#         pass #print(f"🌐 正在打开页面: {url}")
#         try:
#             # 如果提供了referrer，使用特殊的方式设置referrer
#             if referrer:
#                 # 先导航到referrer页面
#                 await page.goto(referrer, timeout=5000, wait_until="domcontentloaded")
#                 # 然后从referrer页面导航到目标URL
#                 await page.goto(url, timeout=30000, wait_until="domcontentloaded")
#             else:
#                 # 直接导航到目标URL
#                 await page.goto(url, timeout=30000, wait_until="domcontentloaded")
#         except Exception as e:
#             pass #print(f"⚠️ 页面跳转异常: {e}")

#         for _ in range(10):
#             pass #print(f"⏳ 当前URL: {page.url}")
#             await asyncio.sleep(1)

#         if not all_urls or all_urls[-1] != page.url:
#             all_urls.append(page.url)

#         await browser.close()

#     # pass #print("\n🔁 跳转链：")
#     # for i, u in enumerate(all_urls, 1):
#     #     pass #print(f"[{i}] {u}")

#     return [(url, -1) for url in all_urls]


async def trace_redirect_with_browser(url, ipport=None, ua=None, headless=True, referrer=None, check_ip_leak=False):
    """
    使用浏览器跟踪URL重定向链
    
    Args:
        url: 需要跟踪的URL
        ipport: 代理配置，格式为 {'ip': '1.2.3.4', 'port': 1080}
        ua: User-Agent字符串
        headless: 是否使用无头模式
        referrer: 来源URL
        check_ip_leak: 是否检查IP泄露
    
    Returns:
        list: URL跟踪链，格式为 [(url, status_code), ...]
    """
    all_urls = []
    ip_leak_results = None
    
    try:
        async with async_playwright() as p:
            # 获取真实IP (如果启用IP检测)
            real_ip = None
            if check_ip_leak:
                try:
                    from linkutil.check_ip import get_real_ip
                    real_ip, _ = get_real_ip()
                    logger.info(f"真实IP: {real_ip}")
                except ImportError:
                    logger.warning("未找到check_ip模块，跳过IP泄露检测")
                    check_ip_leak = False
            
            # 配置代理
            proxy_config = None
            if ipport:
                # 尝试多种代理格式
                proxy_formats = [
                    f"socks5://{ipport.get('ip')}:{ipport.get('port')}",  # 标准SOCKS5
                    f"socks5h://{ipport.get('ip')}:{ipport.get('port')}",  # SOCKS5+DNS解析
                    f"http://{ipport.get('ip')}:{ipport.get('port')}"      # HTTP代理
                ]
                
                # 仅记录要使用的第一种格式
                proxy_config = {
                    "server": proxy_formats[0],
                    "timeout": 30000
                }
                logger.info(f"使用代理配置: {proxy_config}")
            
            # 浏览器启动选项
            browser_options = {
                "headless": headless,
                "timeout": 60000,
            }
            
            # 添加代理配置
            if proxy_config:
                browser_options["proxy"] = proxy_config
            
            # 启动浏览器
            browser = None
            proxy_error = False
            
            try:
                logger.info(f"启动浏览器，代理: {'已配置' if proxy_config else '未配置'}")
                browser = await p.chromium.launch(**browser_options)
            except Exception as e:
                proxy_error = True
                logger.error(f"使用代理启动浏览器失败: {str(e)}")
                
                if "ERR_NO_SUPPORTED_PROXIES" in str(e) and proxy_config:
                    logger.warning("尝试切换代理格式...")
                    
                    # 尝试其他代理格式
                    for i, proxy_format in enumerate(proxy_formats[1:], 1):
                        try:
                            browser_options["proxy"] = {"server": proxy_format}
                            logger.info(f"尝试代理格式 {i+1}: {proxy_format}")
                            browser = await p.chromium.launch(**browser_options)
                            proxy_error = False
                            break
                        except Exception as e2:
                            logger.warning(f"代理格式 {i+1} 也失败: {str(e2)}")
                
                # 如果所有代理格式都失败，尝试不使用代理
                if proxy_error:
                    logger.warning("所有代理格式都失败，尝试不使用代理...")
                    browser_options.pop("proxy", None)
                    try:
                        browser = await p.chromium.launch(**browser_options)
                        logger.info("已成功不使用代理启动浏览器")
                        proxy_error = False
                    except Exception as e3:
                        logger.error(f"不使用代理也失败: {str(e3)}")
                        return [(url, -1)]  # 返回原始URL
            
            if not browser:
                logger.error("无法启动浏览器")
                return [(url, -1)]
                
            # 设置上下文参数
            context_options = {
                "user_agent": ua or (
                    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                    "AppleWebKit/537.36 (KHTML, like Gecko) "
                    "Chrome/119.0.0.0 Safari/537.36"
                ),
                "bypass_csp": True,
                "locale": "en-US",
                "timezone_id": "UTC",  # 统一时区
                "viewport": {"width": 1280, "height": 800},
                "ignore_https_errors": True,  # 忽略HTTPS错误
                "has_touch": False,  # 禁用触摸功能
                "permissions": [],  # 限制权限
                "extra_http_headers": {
                    "Accept-Language": "en-US,en;q=0.9",
                    "DNT": "1",  # 请勿跟踪
                    **({"Referer": referrer} if referrer else {"Referer": url})
                }
            }
            
            try:
                context = await browser.new_context(**context_options)
            except Exception as e:
                logger.error(f"创建浏览器上下文失败: {str(e)}")
                await browser.close()
                return [(url, -1)]
                
            # 强化反检测措施
            await context.add_init_script("""
                // 阻止WebRTC泄露真实IP
                Object.defineProperty(navigator, 'mediaDevices', { value: undefined });
                
                // 阻止Canvas指纹识别
                const originalGetContext = HTMLCanvasElement.prototype.getContext;
                HTMLCanvasElement.prototype.getContext = function() {
                    const context = originalGetContext.apply(this, arguments);
                    if (context && arguments[0] === '2d') {
                        const originalGetImageData = context.getImageData;
                        context.getImageData = function() {
                            const imageData = originalGetImageData.apply(this, arguments);
                            // 添加微小随机干扰
                            const pixels = imageData.data;
                            for (let i = 0; i < pixels.length; i += 4) {
                                pixels[i] = pixels[i] + Math.floor(Math.random() * 2);
                                pixels[i+1] = pixels[i+1] + Math.floor(Math.random() * 2);
                                pixels[i+2] = pixels[i+2] + Math.floor(Math.random() * 2);
                            }
                            return imageData;
                        };
                    }
                    return context;
                };
                
                // 硬件信息
                Object.defineProperty(navigator, 'hardwareConcurrency', { 
                    get: () => 4 
                });
                Object.defineProperty(navigator, 'deviceMemory', { 
                    get: () => 8 
                });
                Object.defineProperty(navigator, 'languages', { 
                    get: () => ['en-US', 'en'] 
                });
                
                // 阻止检测Playwright/Automation
                Object.defineProperty(navigator, 'webdriver', { 
                    get: () => false 
                });
                
                // 时区保持一致
                Object.defineProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', {
                    value: () => ({ timeZone: 'UTC' })
                });
            """)
            
            # 检查IP是否泄露
            if check_ip_leak and real_ip:
                try:
                    from linkutil.check_ip import check_for_ip_leaks
                    test_page = await context.new_page()
                    await test_page.goto("https://ipinfo.io/json", wait_until="domcontentloaded")
                    
                    try:
                        ip_content = await test_page.inner_text("pre")
                        ip_data = json.loads(ip_content)
                        proxy_ip = ip_data.get("ip")
                        
                        if proxy_ip == real_ip:
                            logger.warning(f"⚠️ 检测到IP泄露! 代理IP ({proxy_ip}) 与真实IP ({real_ip}) 相同")
                            if not proxy_error:  # 只有在使用代理时才算泄露
                                ip_leak_results = {
                                    "is_leaked": True,
                                    "real_ip": real_ip,
                                    "proxy_ip": proxy_ip
                                }
                        else:
                            logger.info(f"✅ IP检测正常: 代理IP = {proxy_ip}, 真实IP = {real_ip}")
                            ip_leak_results = {
                                "is_leaked": False,
                                "real_ip": real_ip,
                                "proxy_ip": proxy_ip
                            }
                    except Exception as e:
                        logger.error(f"解析IP信息失败: {str(e)}")
                    
                    await test_page.close()
                except ImportError:
                    logger.warning("未能导入IP检测模块")
                except Exception as e:
                    logger.error(f"IP泄露检测失败: {str(e)}")
            
            # 创建新页面并设置请求监听
            try:
                page = await context.new_page()
                
                # 记录导航请求
                page.on("request", lambda request: 
                    all_urls.append(request.url) if request.is_navigation_request() else None
                )
                
                # 捕获页面错误
                page.on("pageerror", lambda err: 
                    logger.warning(f"页面JavaScript错误: {err}")
                )
                
                # 设置较长的导航超时
                page.set_default_navigation_timeout(60000)
                
                # 进行页面导航
                try:
                    if referrer:
                        # logger.info(f"首先访问referrer页面: {referrer}")
                        # await page.goto(referrer, timeout=10000, wait_until="domcontentloaded")
                        # await page.wait_for_timeout(1000)  # 等待1秒
                        
                        logger.info(f"然后访问目标URL: {url}")
                        await page.goto(url, timeout=30000, wait_until="domcontentloaded")
                    else:
                        logger.info(f"直接访问URL: {url}")
                        await page.goto(url, timeout=30000, wait_until="domcontentloaded")
                except Exception as e:
                    logger.warning(f"页面加载异常: {str(e)}")
                    # 即使加载失败，也记录当前URL
                    if page.url and page.url != "about:blank":
                        all_urls.append(page.url)
                
                # 等待页面完全加载和可能的JS重定向
                for i in range(15):  # 等待15秒
                    current_url = page.url
                    logger.debug(f"[{i+1}/15] 当前URL: {current_url}")
                    await asyncio.sleep(1)
                    
                    # 如果URL在连续2秒内没有变化，可以提前结束等待
                    if i > 0 and page.url == current_url:
                        pass  # 继续等待
                    
                # 确保记录最终URL
                if not all_urls or all_urls[-1] != page.url:
                    all_urls.append(page.url)
                
                logger.info(f"跟踪完成，共记录 {len(all_urls)} 个URL")
                
            except Exception as page_error:
                logger.error(f"页面操作失败: {str(page_error)}")
            finally:
                await browser.close()
                
    except Exception as main_error:
        logger.error(f"trace_redirect_with_browser总体错误: {str(main_error)}")
        return [(url, -1)]
    
    # 确保返回格式一致
    if not all_urls:
        all_urls.append(url)
    
    # 如果检测到IP泄露，记录到结果中
    if ip_leak_results and ip_leak_results.get("is_leaked", False):
        logger.warning("⚠️ IP泄露警告: 建议更换代理或调整配置")
    
    return [(u, -1) for u in all_urls]



async def get_final_url(num, OfferLink, TargetNation, redirect, URLldentifiers, log_des, referrer=None):
    # 创建一个请求对象，包含请求连接OfferLink， 请求国家TargetNation，结果数据redirect，验证URLldentifiers
    try:
        data = {
            'OfferLink': OfferLink,
            'TargetNation': TargetNation,
            'redirect': redirect,
            'URLldentifiers': URLldentifiers,
        }
        
        proxy_name = "proxy6"
        # 通过国家获取代理，和UA
        if num % 3 == 0:
            # ipport = get_proxy3(TargetNation)
            ipport = get_proxy6(TargetNation)
        if num % 3 == 1:
            ipport = get_proxy6(TargetNation)
            # pass
        if num % 3 == 2:
            proxy_name = "proxy4"
            ipport = get_proxy4(TargetNation)
        logger.debug(f"ipport : {ipport}")
        ua = get_ua()
        # hd = {
        #     'User-Agent': ua
        # }
        proxies = {
            "http": f"socks5://{ipport.get('ip')}:{ipport.get('port')}",
            "https": f"socks5://{ipport.get('ip')}:{ipport.get('port')}",
        }
        
        # log_des.append(("proxy", f"proxy{num%3+3}"))
        # log_des.append(("ipport", ipport))
        import requests as rq
        testUrl = 'https://ipinfo.io'
        res = rq.get(testUrl, proxies=proxies, timeout=5)
        # log_des.append(("IPifo", res.text))
        log_des["proxyInfo"] = {
            "proxy_name": proxy_name,
            "UA": ua,
            "ipport": ipport,
            "IPinfo": res.text
        }
        # 进行请求
        # urls = get_redirect_trace1(data.get('OfferLink'), hd, proxies)
        urls = await trace_redirect_with_browser(data.get('OfferLink'), ipport=ipport, ua=ua, referrer=referrer)
        # log_des.append(("urls", urls))
        log_des["urls"] = urls
        
        # 判断结果
        flag, final_url = check_url(urls, data.get("redirect"), data.get('URLldentifiers'))

        # log_des.append(("result", flag, final_url))
        log_des["final_url"] = final_url
        log_des["flag"] = flag

        pass #print("final_url", flag)
        # 返回最终URL，写入数据库
        if flag:
            return final_url
        else:
            return None
    except Exception as e:
        logger.debug(f"get_final_url | get_proxy : {e}")

async def get_link(OfferLink, TargetNation, redirect, URLldentifiers, log_des, referrer=None):
    # 使用不同的代理获取对应的连接
    # 1、使用xxx代理
    final_url = None
    # log_des.append(("init data", OfferLink, TargetNation, redirect, URLldentifiers))
    log_des["linkInfo"] = {
            "OfferLink": OfferLink,
            "TargetNation": TargetNation,
            "redirect": redirect,
            "URLldentifiers": URLldentifiers,
        }
    for i in range(6):
        try:
            # 通过i的值来判断使用哪个代理            
            final_url = await get_final_url(i, OfferLink, TargetNation, redirect, URLldentifiers, log_des, referrer)
            if final_url:
                # 如果成功获取到final_url，则跳出循环
                return final_url
        except Exception as e:
            logger.debug("get_link: ", e)
            pass #print(f"Error: {e}")
    
    return None

# OfferLink ="https://link.smilekols.com/track.php?ref=1069338&aid=1004395&euid=%7B%7BDATETIME%7D%7D&t=https%3A%2F%2Fwww.dickies.com" # "https://c.duomai.com/track.php?aid=36278&dm_fid=16079&euid=%7B%7BDATETIME%7D%7D&ref=1246487&t=https%3A%2F%2Fwww.ethika.com%2F"# "https://link.smilekols.com/track.php?ref=1069338&aid=1004395&euid=%7B%7BDATETIME%7D%7D&t=https%3A%2F%2Fwww.dickies.com" #  "https://c.duomai.com/track.php?aid=36278&dm_fid=16079&euid=%7B%7BDATETIME%7D%7D&ref=1246487&t=https%3A%2F%2Fwww.ethika.com%2F" # "https://stockx.pvxt.net/c/4791133/1810699/9060?sharedid=LKB22020&subId3=332168597&u=https://stockx.com"#  "https://click.linkbest.com/t.php?k=CR3UCR3USRNlEVFRVQEJ0NlI0Nl0DZp9lY1NnJwYDNyEDMx0DZpBnJ2UTM1ITPklWbmYyN4YTO1QTMzIzM981XsB3Yf9lJt92Yug3aj9GdzZkMlYkMlE0MlMHc0RHa9wmc1Z"
# Tar = "US"
# red = 0
# URLI ="https://www.dickies.com/,irclickid,sharedid,utm_campaign,utm_source,utm_medium,utm_content,utm_term,irgwc" # "https://www.ethika.com/,clickref,utm_medium,utm_source,utm_source_platform,utm_campaign,utm_term,utm_content" # "https://www.dickies.com/,irclickid,sharedid,utm_campaign,utm_source,utm_medium,utm_content,utm_term,irgwc" # "https://www.ethika.com/,clickref,utm_medium,utm_source,utm_source_platform,utm_campaign,utm_term,utm_content"

# dd = [{"OfferLink": "https://click.linkbest.com/t.php?k=CR3UCR3USRNlEVFRVQEJ0NlI0Nl0DZp9lY1NnJwYDNyEDMx0DZpBnJ2UTM1ITPklWbmYyN4YTO1QTMzIzM981XsB3Yf9lJt92Yug3aj9GdzZkMlYkMlE0MlMHc0RHa9wmc1Z",
# "Tar": "0",
# "red": "US",
# "URLI": "https://stockx.com,utm_source,utm_medium,utm_campaign"},
# {"OfferLink": "https://link.smilekols.com/track.php?ref=1069338&aid=1004395&euid=%7B%7BDATETIME%7D%7D&t=https%3A%2F%2Fwww.dickies.com",
# "Tar": "0",
# "red": "US",
# "URLI": "https://www.dickies.com/,irclickid,sharedid,utm_campaign,utm_source,utm_medium,utm_content,utm_term,irgwc"},
# {"OfferLink": "https://kn.kreatornow.com/kn/track.php?ref=1069327&aid=1007026&euid=%7B%7BDATETIME%7D%7D&t=https%3A%2F%2Fwww.shoepalace.com%2F",
# "Tar": "0",
# "red": "US",
# "URLI": "https://www.shoepalace.com/,ranMID,ranEAID,ranSiteID"},
# {"OfferLink": "https://trac.fanstoshop.com/track.php?ref=1072362&aid=1004977&euid=%7B%7BDATETIME%7D%7D&t=https%3A%2F%2Fpuffy.com",
# "Tar": "0",
# "red": "US",
# "URLI": "https://puffy.com/.irclickid.utm_source.utm_medium.clickid.sharedid.iradid.irgwc"},
# {"OfferLink": "https://link.smilekols.com/track.php?ref=1069341&aid=1007732&euid=%7B%7BDATETIME%7D%7D&t=https%3A%2F%2Fwww.lego.com",
# "Tar": "3",
# "red": "GB",
# "URLI": "https://www.lego.com/rakuten,ranMID,ranEAID,ranSiteID,url"},
# {"OfferLink": "https://www.linkhaitao.com/index.php?mod=lhdeal&track=69c26tkGRpBwKWkf3JzdQM9AoY9Cr139382xHvQrS64hb_bLV61j1EXxWm0D5PiSBks4_c&new=https%3A%2F%2Fwww.lindt.co.uk%2F",
# "Tar": "0",
# "red": "GB",
# "URLI": "https://www.lindt.co.uk/,utm_source,utm_medium,utm_campaign,utm_AdConten,utm_medium,utm_campaign,utm_AdContent,sv1,sv_campaign_id,awc"},
# {"OfferLink": "https://app.partnerboost.com/track/3166BphAw7Qrj_a0DeEjHiqsXDHKFGrXq_aaiEWRADLYw6SR8ufORtye6KwKbWWB6XHz5Xl6IRFKFr2xa7Rsi6",
# "Tar": "0",
# "red": "US",
# "URLI": "https://shopmicas.com/,pbtid,utm_source,utm_medium,utm_campaign,utm_content"},
# {"OfferLink": "https://www.linkbux.com/track/273ap7bIutQSHMAMwxUWdrPkAYKyD90UgOe1MxTHnV92Bw9bg_bPMiZzRF0CUHs2dGDd6sQ_c_c?url=https%3A%2F%2Fwww.samsung.com%2Fus%2F",
# "Tar": "0",
# "red": "US",
# "URLI": "https://www.samsung.com/us/,CID,utm_source,utm_medium,utm_campaign,utm_content,rktevent,ranMID,ranEAID,ranSiteID"},
# {"OfferLink": "https://c.duomai.com/track.php?aid=36278&dm_fid=16079&euid=%7B%7BDATETIME%7D%7D&ref=1246487&t=https%3A%2F%2Fwww.ethika.com%2F",
# "Tar": "0",
# "red": "US",
# "URLI": "https://www.ethika.com/,clickref,utm_medium,utm_source,utm_source_platform,utm_campaign,utm_term,utm_content"}]
# res_ = []
# for d in dd:
#     OfferLink = d["OfferLink"]
#     Tar = d["red"]
#     red = int(d["Tar"])
#     URLI = d["URLI"]

#     log_des = []
#     get_link(OfferLink, Tar, red, URLI, log_des)
#     res_.append(log_des)
# pass #print(res_)
6.38
# res = requests.get("https://stockx.pvxt.net/c/4791133/1810699/9060?sharedid=LKB22020&subId3=332168597&u=https://stockx.com")
# pass #print(res.text)


OfferLink = "https://pb.eclicklink.com/5ZS8GM/7XDN2/"# "https://link.smilekols.com/log?clickid=%7B%7BDATETIME%7D%7D&cmpgn=1008504&pub=1078564&path=https%3A%2F%2Fwww.primevideo.com"
Tar = "US"
red = 0
URLI =""


async def my_get_link(OfferLink, TargetNation, redirect, URLldentifiers, referrer=None):
    
    log_des = {}
    final_url = await get_link(OfferLink, TargetNation, redirect, URLldentifiers, log_des, referrer)
    
    return final_url, log_des

# print(my_get_link(OfferLink,Tar,red,URLI))