import time
import requests
from setting import *
from .time_utils import getCurTime
import json
import random

def get_random_delay(min_delay=1, max_delay=3):
    """生成随机延迟时间"""
    return random.uniform(min_delay, max_delay)

def human_like_delay():
    """随机延迟模拟人类阅读时间"""
    delay_types = [
        (1, 3),    # 快速浏览
        (3, 8),    # 正常阅读  
        (8, 15),   # 详细阅读
        (15, 30)   # 深度阅读
    ]
    min_delay, max_delay = random.choice(delay_types)
    sleep_time = random.uniform(min_delay, max_delay)
    print(f"模拟人类行为，等待 {sleep_time:.2f} 秒")
    time.sleep(sleep_time)

def get_random_headers():
    user_agents = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15'
    ]
    
    return {
        'User-Agent': random.choice(user_agents),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'none',
        'Cache-Control': 'max-age=0',
        'Referer': 'https://www.douban.com/'
    }

def human_like_delay(operation_type="request"):
    """模拟人类操作的延迟"""
    if operation_type == "request":
        delay = get_random_delay(4, 7)  # 请求间延迟
    elif operation_type == "retry":
        delay = get_random_delay(5, 10)  # 重试延迟
    elif operation_type == "page_load":
        delay = get_random_delay(3, 5)  # 页面加载延迟
    else:
        delay = get_random_delay(3, 5)
    
    print(f"{getCurTime()} 模拟人工延迟: {delay:.2f}秒")
    time.sleep(delay)

# 将cookie字符串转换为字典
def parse_cookie_string(cookie_str):
    cookies = {}
    items = cookie_str.split('; ')
    for item in items:
        if '=' in item:
            key, value = item.split('=', 1)
            cookies[key.strip()] = value.strip()
    return cookies

def getweiboCookie():
    #print(COOKIE_PATH)
    with open(COOKIE_PATH, mode="r",encoding="utf-8") as f:
        cookies = f.read()
    return cookies

def ownRequest(url, method="GET", **kwargs):
    """
    封装请求函数，处理代理超时等错误
    Args:
        url: 请求URL
        **args: 其他请求参数
    Returns:
        response: 请求响应对象
    """

    cookie_str=getweiboCookie()

    try:
        cookies = json.loads(cookie_str)
    except json.JSONDecodeError as e:
        print(f"JSON解析错误: {e}")
        cookies = {}
    with open("./cookies.txt", mode="r",encoding="utf-8") as f:
        cookies_str = f.read()
    cookies = parse_cookie_string(cookies_str)

    print("豆瓣开始工作")
    print(f"{method}")
    max_retries = 2  # 最大重试次数
    retry_count = 0
    # 设置合理的超时时间
    timeout = random.randint(15, 25)
    while retry_count < max_retries:
        time.sleep(random.randint(2, 5))
        try:
            print(f"{url}")
            # 随机延迟
            human_like_delay()

            # 随机请求头
            headers = get_random_headers()
            
            # 添加随机Referer
            referers = [
                'https://www.douban.com/',
                'https://www.douban.com/group',
                'https://www.douban.com/search',
                'https://www.google.com/'
            ]
            headers['Referer'] = random.choice(referers)
            
            if method == "GET":
                response = requests.get(url, headers=headers, cookies=cookies, proxies=PROXIES, timeout=10, verify=False, **kwargs)
            elif method == "POST":
                response = requests.post(url, headers=headers, cookies=cookies, proxies=PROXIES, timeout=10, verify=False, **kwargs)
            else:
                print(f"不支持的请求方法: {method}")
                return None
                
            # 检查风控
            if "机器人程序" in response.text or "禁止访问" in response.text:
                print("检测到风控，等待更长时间...")
                time.sleep(random.uniform(30, 60))
                retry_count += 1
                continue

            if response.status_code != 200:
                # 修复这里：使用 retry_count 而不是 attempt
                print(f"❌请求失败，状态码:{url}== {response.status_code}，尝试 {retry_count + 1}/{max_retries}")
                time.sleep(random.uniform(30, 60))
                retry_count += 1
                continue
                
            if response.status_code == 200:
                # 成功后的随机停留
                time.sleep(random.uniform(2, 5))
                return response    
                
        except requests.exceptions.ProxyError:
            print(f"代理错误，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1

        except requests.exceptions.ConnectTimeout:
            print(f"连接超时，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1

        except requests.exceptions.ReadTimeout:
            print(f"读取超时，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1

        except requests.exceptions.RequestException as e:
            print(f"请求异常: {str(e)}，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1
            
        except requests.exceptions.SSLError:
            print(f"SSL错误，正在重试 {url} ({retry_count + 1}/{max_retries})")
            retry_count += 1
            
        except ValueError as e:
            print(f"未知错误，正在重试  {url}=={e} ({retry_count + 1}/{max_retries})")
            retry_count += 1
            
        time.sleep(random.randint(2, 5))

    # 如果所有重试都失败了
    print(f"请求失败，已重试{max_retries}次")
    return None
def ownRequest2(url, method="GET", **kwargs):
    """
    封装请求函数，处理代理超时等错误
    Args:
        url: 请求URL
        **args: 其他请求参数
    Returns:
        response: 请求响应对象
    """

    cookie_str = getweiboCookie()

    try:
        cookies = json.loads(cookie_str)
    except json.JSONDecodeError as e:
        print(f"JSON解析错误: {e}")
        cookies = {}
        
    with open("./cookies.txt", mode="r", encoding="utf-8") as f:
        cookies_str = f.read()
    cookies = parse_cookie_string(cookies_str)

    print("豆瓣开始工作")
    print(f"{method}")

    # 设置合理的超时时间
    timeout = random.randint(2, 5)

    try:
        print(f"{url}")
        # 随机延迟
        human_like_delay()

        # 随机请求头
        headers = get_random_headers()
        
        # 添加随机Referer
        referers = [
            'https://www.douban.com/',
            'https://www.douban.com/group',
            'https://www.douban.com/search',
            'https://www.google.com/'
        ]
        headers['Referer'] = random.choice(referers)
        
        if method == "GET":
            response = requests.get(url, headers=headers, cookies=cookies, proxies=PROXIES, timeout=10, verify=False, **kwargs)
        elif method == "POST":
            response = requests.post(url, headers=headers, cookies=cookies, proxies=PROXIES, timeout=10, verify=False, **kwargs)
        else:
            print(f"不支持的请求方法: {method}")
            return None
            
        # 检查风控
        if "机器人程序" in response.text or "禁止访问" in response.text:
            print("检测到风控")
            return None

        if response.status_code != 200:
            print(f"❌请求失败，状态码:{url}== {response.status_code}")
            return None
            
        if response.status_code == 200:
            # 成功后的随机停留
            time.sleep(random.uniform(2, 5))
            return response    
            
    except requests.exceptions.ProxyError:
        print(f"代理错误: {url}")

    except requests.exceptions.ConnectTimeout:
        print(f"连接超时: {url}")

    except requests.exceptions.ReadTimeout:
        print(f"读取超时: {url}")

    except requests.exceptions.RequestException as e:
        print(f"请求异常: {url} - {str(e)}")
        
    except requests.exceptions.SSLError:
        print(f"SSL错误: {url}")
        
    except ValueError as e:
        print(f"未知错误: {url}=={e}")

    # 如果请求失败
    print(f"请求失败: {url}")
    return None
#阿里云图片上传接口
def process_imgUrl(img_url):
    url = "http://xdrj.lottefuture.com/zxy/V1/Gen2/ed1XF/ModuleController/SearchModule/uploadImg"

    files = [
        ('Img', (f'{int(time.time())}.png', requests.get(img_url).content, 'image/webp'))
    ]
    res = ownRequest(url, method="post", files=files)
    try:
        return res.json().get("data", None)
    except:
        print(getCurTime(), f"上传图片{img_url}失败,返回的响应=> {res.text if res else None}")
        return None
