from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import requests
from fake_useragent import UserAgent
import uvicorn
import re
from requests.exceptions import SSLError, ProxyError
from urllib.parse import urljoin, urlparse, urlunparse
from typing import Optional
import redis
import tldextract
from urllib.parse import urlparse, parse_qs
import logging
from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler
import os
import json

# 日志配置部分
log_file = 'app.log'
backup_dir = 'logs_backup'
if not os.path.exists(backup_dir):
    os.makedirs(backup_dir)

max_log_size = 10 * 1024 * 1024  # 10MB
backup_count = 5  # 保留5个备份文件

handler = RotatingFileHandler(
    os.path.join(backup_dir, log_file),
    maxBytes=max_log_size,
    backupCount=backup_count
)

timed_handler = TimedRotatingFileHandler(
    os.path.join(backup_dir, log_file),
    when='midnight',  # 每天午夜切割日志
    interval=1,  # 每天
    backupCount=7  # 保留7天的日志
)

formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
timed_handler.setFormatter(formatter)

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# logger.addHandler(handler)
logger.addHandler(timed_handler)

logger.info("日志系统已初始化")

app = FastAPI()

# 初始化 Redis 客户端
redis_client = redis.Redis(host='localhost', port=6379, db=0)


class RequestData(BaseModel):
    last_url: str
    region: str
    proxy_session_id: str
    user_agent: str
    # 使用哪个代理 abc: 默认; bright_data: 特殊网址
    use_agency: Optional[str] = 'abc'  # 设置为可选参数，默认值为 None


def get_base_domain(url):
    # 解析 URL 以获取协议和主机名
    parsed_url = urlparse(url)
    base_domain = urlunparse((parsed_url.scheme, parsed_url.netloc, '', '', '', ''))
    return base_domain


@app.post("/redirect_trace/")
def get_redirect_trace(data: RequestData):

    # 读取 special_domain 集合
    root_domains = redis_client.smembers('special_domain')
    root_domains = {domain.decode('utf-8').replace('"', '') for domain in root_domains}

    proxies = {}  # 声明为全局变量，使其在整个方法中可用
    proxies_of_bright = {}  # 声明为全局变量，使其在整个方法中可用
    proxies_of_abc = {}  # 声明为全局变量，使其在整个方法中可用

    if data.use_agency == 'bright_data':
        # # 住宅代理 brightdata
        proxies_of_bright = {
            'http': 'http://brd-customer-hl_d524ed17-zone-residential_proxy1-country-' + data.region + '-session-' +
                    data.proxy_session_id + ':pvf69g7pwm7g@brd.superproxy.io:33335',
            'https': 'http://brd-customer-hl_d524ed17-zone-residential_proxy1-country-' + data.region + '-session-' +
                     data.proxy_session_id + ':pvf69g7pwm7g@brd.superproxy.io:33335', }
        proxies = proxies_of_bright
        # pass #print('===========================brightdata 打印一下使用的的代理平台: ' + data.use_agency)
        logger.info(
            f"{data.proxy_session_id}===========================brightdata 打印一下使用的的代理平台: {data.use_agency}")
    else:
        # 住宅代理 abc 必
        proxies_of_abc = {
            'http': 'socks5h://1o8bJROJq0-zone-star-region-' + data.region + '-session-' +
                    data.proxy_session_id + '-SessTime-15:27684262@f08941206884eefe.abcproxy.vip:4950',
            'https': 'socks5h://1o8bJROJq0-zone-star-region-' + data.region + '-session-' +
                     data.proxy_session_id + '-SessTime-15:27684262@f08941206884eefe.abcproxy.vip:4950',
        }
        proxies = proxies_of_abc
        # pass #print('===========================abc 打印一下使用的的代理平台: ' + data.use_agency)
        logger.info(
            f"{data.proxy_session_id}===========================abc 打印一下使用的的代理平台: {data.use_agency}")

    url = data.last_url

    # 初始化 UserAgent
    # ua = UserAgent()

    # 随机生成 User-Agent
    # random_ua = ua.random
    random_ua = get_random_desktop_ua()
    # 添加随机 UA 到请求头
    headers = {
        'User-Agent': random_ua
    }
    realIpPort = ''

    if data.use_agency == 'bright_data':
        try:
            ip_res = requests.get("http://ipinfo.io", proxies=proxies)
            # pass #print(response.text)
            ip_res = json.loads(ip_res.text)
            # pass #print(response)
            realIpPort = ip_res["ip"]
            logger.info(f"{data.proxy_session_id}代理IP: {realIpPort}")

        except Exception as e:
            logger.error(f"发生错误 {data.proxy_session_id}: {e}")
    else:
        try:

            realIpPort = requests.get('https://www.adsxx.com/api/free/ip/info',
                                      headers=headers,
                                      proxies=proxies,
                                      timeout=8, verify=False)
            realIpPort = realIpPort.text
            logger.info(f"{data.proxy_session_id}代理IP: {realIpPort}")
        except SSLError as ssl_err:
            logger.error(f"SSL错误 {data.proxy_session_id}: {ssl_err}")
        except ProxyError as proxy_err:
            logger.error(f"代理错误 {data.proxy_session_id}: {proxy_err}")
        except Exception as e:
            logger.error(f"其他错误 {data.proxy_session_id}: {e}")

    trace = []
    visited_urls = set()

    def fetch_url(current_url):
        if current_url in visited_urls:
            return
        visited_urls.add(current_url)

        trace.append((current_url, 0))

        # 提取 URL 的域名部分
        extracted = tldextract.extract(current_url)
        domain = f"{extracted.domain}.{extracted.suffix}"  # 根域名
        # 检查是否在根域名列表中
        is_subdomain = domain in root_domains
        logger.info(f'===============子域名:{domain} 是否在特别域名列表中: {is_subdomain}')

        try:
            # 设置 Referer 头
            headers_with_referer = headers.copy()
            headers_with_referer['Referer'] = current_url
            response = requests.get(current_url, headers=headers_with_referer,
                                    proxies=proxies_of_bright if is_subdomain else proxies_of_abc,
                                    allow_redirects=False, timeout=13,
                                    verify=False)
            if response.status_code == 502 and is_subdomain:
                # 如果返回了 502, 且是特殊域名中的子域名, 则切换代理, 再试一次
                # 使用 session 对象来管理 Cookie
                response = requests.get(current_url, headers=headers_with_referer,
                                        proxies=proxies_of_abc,
                                        allow_redirects=False, timeout=14,
                                        verify=False)

            trace[-1] = (current_url, response.status_code)
            logger.info(f"{data.proxy_session_id}访问: {current_url}, 状态码: {response.status_code}")

            if response.status_code == 200:
                # 使用第一种常规的跳转方式获取重定向链接
                redirectUrl = scratch_js_jump(current_url, response.text)
                # 判断第一种方式是否获取到 没有的话, 采用第二种试试
                if not redirectUrl:
                    if 'const url = new URL(location.href);' in response.text:
                        redirectUrl = special_url_a(current_url)
                    elif 'url=" + params.l' in response.text:
                        # 再试一下特殊链接b方法
                        redirectUrl = special_url_b(response.text, current_url)


                # 检查基础域名并处理斜杠
                # 提取当前重定向的基础域名
                current_base_domain = get_base_domain(current_url)
                # 检查基础域名并处理斜杠
                if current_url.startswith(current_base_domain):
                    # 提取基础域名和查询参数
                    base_part, query_part = current_url.split('?', 1) if '?' in current_url else (current_url, '')

                    # 检查基础域名后是否有路径
                    has_path = len(base_part) > len(current_base_domain) and base_part[
                                                                             len(current_base_domain):].startswith('/')

                    # 检查基础域名后是否已经有斜杠
                    has_trailing_slash = base_part.endswith('/')

                    # 如果基础域名后没有路径且有查询参数，且没有斜杠，则添加斜杠
                    if not has_path and query_part and not has_trailing_slash:
                        base_part += '/'

                    # 仅在没有路径的情况下重构 URL
                    if not has_path:
                        current_url = base_part + ('?' + query_part if query_part else '')

                    trace[-1] = (current_url, 200)  # 用新 URL 和状态码替换旧的

                if redirectUrl:
                    fetch_url(redirectUrl)
            # 处理重定向
            elif response.is_redirect or response.is_permanent_redirect:
                redirect_url = response.headers.get('Location')
                if redirect_url:
                    # 如果没有协议前缀，使用当前 URL 的基础域名补全
                    if not redirect_url.startswith(('http://', 'https://')):
                        base_domain = get_base_domain(current_url)
                        redirect_url = urljoin(base_domain, redirect_url)

                    if redirect_url.startswith('http://'):
                        redirect_url = redirect_url.replace('http://', 'https://', 1)
                    logger.info(f"{data.proxy_session_id}重定向到: {redirect_url}")
                    fetch_url(redirect_url)
            elif response.status_code == 403:
                # 检查基础域名并处理斜杠
                # 提取当前重定向的基础域名
                current_base_domain = get_base_domain(current_url)
                # 检查基础域名并处理斜杠
                if current_url.startswith(current_base_domain):
                    # 提取基础域名和查询参数
                    base_part, query_part = current_url.split('?', 1) if '?' in current_url else (current_url, '')

                    # 检查基础域名后是否有路径
                    has_path = len(base_part) > len(current_base_domain) and base_part[
                                                                             len(current_base_domain):].startswith('/')

                    # 检查基础域名后是否已经有斜杠
                    has_trailing_slash = base_part.endswith('/')

                    # 如果基础域名后没有路径且有查询参数，且没有斜杠，则添加斜杠
                    if not has_path and query_part and not has_trailing_slash:
                        base_part += '/'

                    # 仅在没有路径的情况下重构 URL
                    if not has_path:
                        current_url = base_part + ('?' + query_part if query_part else '')

                    trace[-1] = (current_url, 200)  # 用新 URL 和状态码替换旧的

        except requests.exceptions.HTTPError as http_err:
            logger.info(f"{data.proxy_session_id}HTTP错误: {http_err}")
        except Exception as e:
            logger.info(f"{data.proxy_session_id}其他错误: {e}")

    fetch_url(url)

    # 返回结果
    result = {
        "trace": trace,
        "ip": realIpPort if realIpPort else '',
        "ua": random_ua,
        'session_id': data.proxy_session_id
    }
    logger.info(f"============打印一下返回结果: {result}")
    return result


# 只在 URL 中包含转义字符时进行处理
def process_redirect_url(url):
    # 只在 URL 中包含转义字符时进行处理
    if '\\' in url:
        # 替换转义字符
        processed_url = url.replace(r'\\', '').replace(r'\/', '/')
        return processed_url
    return url  # 不需要处理，直接返回原始 URL


# 获取js重定向操作
def scratch_js_jump(pre_url, text):
    # 查找直接链接
    link_pattern = (
        r'(?:window\.location|window\.location\.href|location\.replace|window\.location\.replace)\s*=\s*["\'](.*?)["\']|'
        r'window\.location\.replace\(\s*["\'](.*?)["\']\s*\)'
    )
    link_match = re.search(link_pattern, text)

    if link_match:
        redirect_url = link_match.group(1) or link_match.group(2)
        redirect_url = process_redirect_url(redirect_url)  # 针对转义字符处理 URL
        logger.info(f"发现直接重定向 URL: {redirect_url}")
        return redirect_url
    else:
        # 查找变量名
        var_name_pattern = (
            r'(?:window\.location\.href|location\.replace|window\.location\.replace)\s*\(\s*(\w+)\s*\)|('
            r'?:window\.location\.href|location\.replace|window\.location\.replace)\s*=\s*(\w+)'
        )
        var_name_matches = re.findall(var_name_pattern, text)

        if var_name_matches:
            variable_names = [match[0] or match[1] for match in var_name_matches]
            for var_name in variable_names:
                # 查找对应变量的值
                variable_pattern = rf'(?:var|let|const)\s+{var_name}\s*=\s*["\'](.*?)["\']|{var_name}\s*=\s*["\'](.*?)["\']'
                variable_match = re.search(variable_pattern, text)

                if variable_match:
                    redirect_url = variable_match.group(1) or variable_match.group(2)
                    redirect_url = process_redirect_url(redirect_url)  # 针对转义字符处理 URL

                    # 检查是否有 +location.hash
                    if '+location.hash' in text:
                        hash_value = pre_url.split('#', 1)[1] if '#' in pre_url else ''
                        redirect_url = f"{redirect_url}{hash_value}"

                    logger.info(f"发现重定向变量: {var_name} = {redirect_url}")

                    return redirect_url  # 返回找到的 URL
            else:
                logger.info("没有找到对应变量的值")
                return ''
        else:
            # 检查 <meta http-equiv="refresh"> 重定向
            meta_refresh_pattern = r'<meta\s+http-equiv=["\']refresh["\']\s+content=["\']\d+;\s*url=(.*?)["\']'
            meta_refresh_match = re.search(meta_refresh_pattern, text, re.IGNORECASE)

            if meta_refresh_match:
                redirect_url = meta_refresh_match.group(1)
                redirect_url = process_redirect_url(redirect_url)  # 针对转义字符处理 URL
                logger.info(f"发现 <meta> 重定向 URL: {redirect_url}")
                return redirect_url

            logger.info("没有发现重定向信息")
            return ''


# 特殊的url处理, 返回状态码为200, 且js 跳转内容包括  const url = new URL(location.href);
def special_url_a(url):
    # 解析 URL 以提取参数
    parsed_url = urlparse(url)
    query_params = parse_qs(parsed_url.query)

    # 提取所需的参数
    source = query_params.get("source", [None])[0]
    trip = query_params.get("trip", [None])[0]
    store_title = query_params.get("store_title", [None])[0]
    tracking_link = query_params.get("store_url", [None])[0]

    # if source == "promocode":
    # logger.info(f"Congratulations! You are on your way to earn cash back from {store_title} store.")

    # 创建 JSON 数据并发送 POST 请求
    payload = {
        "trip": trip,
    }

    # 发起 POST 请求
    # response = requests.post("https://service.fatcoupon.com/clickReports", json=payload)

    # 检查请求是否成功
    # if response.status_code == 200:
    #     logger.info("Click report sent successfully.")
    # else:
    #     logger.info(f"Failed to send click report. Status code: {response.status_code}")

    # 进行跳转并获取最终的 URL
    final_url = tracking_link
    logger.info(f"=====================特殊链接获取到的重定向链接为: : {final_url}")

    # 实际进行跳转，获取最终内容
    # final_response = requests.get(final_url)
    # if final_response.status_code == 200:
    #     logger.info("Successfully reached the final URL.")
    #     return final_response.text  # 返回最终页面的内容
    # else:
    #     logger.info(f"Failed to reach the final URL. Status code: {final_response.status_code}")
    #     return None
    return final_url



# 特殊的url处理, 返回状态码为200, 且js 跳转内容包括  const url = new URL(location.href); var meta = document.createElement("meta");
#     meta.setAttribute("http-equiv", "refresh");
#     meta.setAttribute("content", "0; url=" + params.l);
#     document.getElementsByTagName("head")[0].appendChild(meta);
def special_url_b(html_content, current_url):
    # 使用正则表达式匹配 params. 后面的关键字
    match = re.search(r'params\.(\w+)', html_content)
    if match:
        # 提取参数名
        param_name = match.group(1)
        return extract_param_value(current_url, param_name)
    return None


# 提取链接中某个参数的值
def extract_param_value(current_url, param_name):
    # 解析 URL
    parsed_url = urlparse(current_url)

    # 获取查询参数
    query_params = parse_qs(parsed_url.query)

    # 提取指定参数的值，如果存在则返回第一个值
    return query_params.get(param_name, [None])[0]


@app.get("/press/")
def press():
    # url = 'https://stockx.pvxt.net/c/5636076/530344/9060?subId1=T0oFNoooooooocKQZE&sharedid=kaileemckenzie'
    # url = 'https://c.duomai.com/track.php?aid=3696&dm_fid=16079&euid=%7B%7BDATETIME%7D%7D&ref=1291601&t=https%3A%2F%2Fwww.hanes.com%2F'
    # url = 'https://www.topoffersprefer.com/goshop/vip?s_id=22&v=9&t=1728913960'
    url = 'http://www.linkmos.com/api/free/ip/info'

    trace = []
    visited_urls = set()

    def fetch_url(current_url):
        if current_url in visited_urls:
            return
        visited_urls.add(current_url)

        try:
            trace.append((current_url, 0))
            response = requests.get(current_url, allow_redirects=False, timeout=8,
                                    verify=False)

            trace[-1] = (current_url, response.status_code)
            logger.info(f"访问: {current_url}, 状态码: {response.status_code}")

            # 处理重定向
            if response.is_redirect or response.is_permanent_redirect:
                redirect_url = response.headers.get('Location')
                if redirect_url:
                    logger.info(f"重定向到: {redirect_url}")
                    fetch_url(redirect_url)

        except requests.exceptions.HTTPError as http_err:
            logger.info(f"HTTP错误: {http_err}")
        except Exception as e:
            logger.info(f"其他错误: {e}")

    fetch_url(url)
    return {"trace": trace}


def get_random_desktop_ua():
    ua = UserAgent()
    while True:
        random_ua = ua.random
        # 检查 User-Agent 是否包含移动设备标识
        if not any(mobile in random_ua.lower() for mobile in ['mobile', 'android', 'iphone', 'ipad']):
            return random_ua


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8001)
    # ua = UserAgent()
    #
    # # 随机生成 User-Agent
    # random_ua = ua.random
    # # url = 'https://c.duomai.com/track.php?aid=36278&dm_fid=16079&euid=%7B%7BDATETIME%7D%7D&ref=1291601&t=https%3A%2F%2Fwww.ethika.com%2F'
    # # url = 'https://click.linkbest.com/t.php?k=3USRNlEVFRVQEJ0NlI0Nl0DZp9lY1NnJxMDNwIDMx0DZpBnJxgjMxITPklWbmYiRyUya15ybj5ycv5Wat9GZuc3d3ZkMlYkMlE0MlMHc0RHa9wmc1ZCR3UCR'
    # url = 'https://app.partnermatic.com/track/e06140C28gMkuEypeUjiQI4ycyVJLwAOpmT_aa7ntk2LxllzWFvDNF97XXv_bhMEzjPndBJM_aK?url=https%3A%2F%2Fwww.nanit.com%2F'
    # request_data = RequestData(last_url=url,
    #                            region='us',
    #                            proxy_session_id='spY5gxAB',
    #                            user_agent=random_ua)
    # trace = get_redirect_trace(request_data)
    # logger.info(trace)
