import os
import re
import time
import traceback
from urllib.parse import unquote, urlparse

import requests
from PyQt5.QtCore import QThreadPool, QRunnable
from bs4 import BeautifulSoup


def get_num(mh_name, page):
    # 此方法适用于 全局代理方式代理访问
    proxy_url = "http://127.0.0.1:7890"  # 为本机代理端口及ip
    os.environ['HTTP_PROXY'] = proxy_url
    os.environ['HTTPS_PROXY'] = proxy_url
    cookies = {
        '_ym_uid': '1743919454717203657',
        '_ym_d': '1743919454',
        '_ym_isad': '2',
        '_ym_visorc': 'b',
    }
    headers = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-TW;q=0.6',
        'priority': 'u=0, i',
        'referer': 'https://www.wnacg.com/albums-index-page-182-tag-3D.html?',
        'sec-ch-ua': '"Chromium";v="134", "Not:A-Brand";v="24", "Google Chrome";v="134"',
        'sec-ch-ua-mobile': '?1',
        'sec-ch-ua-platform': '"Android"',
        'sec-fetch-dest': 'document',
        'sec-fetch-mode': 'navigate',
        'sec-fetch-site': 'same-origin',
        'sec-fetch-user': '?1',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Mobile Safari/537.36',
        # 'cookie': '_ym_uid=1743919454717203657; _ym_d=1743919454; _ym_isad=2; _ym_visorc=b',
    }
    params = {
        'q': f'{mh_name}',
        'm': '',
        'f': '_all',
        'syn': 'yes',
        's': 'create_time_DESC',
        'p': f'{page}',
    }
    response = requests.get('https://www.wnacg.com/search/', params=params, cookies=cookies, headers=headers)
    print(response.status_code)
    soup = BeautifulSoup(response.text, 'html.parser')
    a_items = (
        soup.find_all(class_="li gallary_item")
    )
    name_list = {}
    for item in a_items:
        ite = item.find("a")
        name_list[ite.get('title')] = ite.get('href').replace("photos", "download")
        print(ite.get('title'))
        # get_pic(ite.find('img').get('src'), ite.get('title'))
    return name_list


def get_pic_dir(num):
    # 此方法只适用于 其他代理端口 账号密码方式
    proxy = {
        'http': 'http://379557621:idfnnMhpjm@108.165.218.102:50100',
        'https': 'https://379557621:idfnnMhpjm@108.165.218.102:50100',
    },

    # 此方法适用于 全局代理方式代理访问
    proxy_url = "http://127.0.0.1:7890"  # 为本机代理端口及ip
    os.environ['HTTP_PROXY'] = proxy_url
    os.environ['HTTPS_PROXY'] = proxy_url

    cookies = {
        '_ym_uid': '1743919454717203657',
        '_ym_d': '1743919454',
        '_ym_isad': '2',
        '_ym_visorc': 'b',
        '_ga': 'GA1.2.1986738831.1743919538',
        '_gid': 'GA1.2.3134594.1743919538',
    }

    headers = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-TW;q=0.6',
        'priority': 'u=0, i',
        'referer': 'https://www.wnacg.com/search/?q=%E6%AF%8D%E5%AD%903d&f=_all&s=create_time_DESC&syn=yes',
        'sec-ch-ua': '"Chromium";v="134", "Not:A-Brand";v="24", "Google Chrome";v="134"',
        'sec-ch-ua-mobile': '?1',
        'sec-ch-ua-platform': '"Android"',
        'sec-fetch-dest': 'document',
        'sec-fetch-mode': 'navigate',
        'sec-fetch-site': 'same-origin',
        'sec-fetch-user': '?1',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Mobile Safari/537.36',
        # 'cookie': '_ym_uid=1743919454717203657; _ym_d=1743919454; _ym_isad=2; _ym_visorc=b; _ga=GA1.2.1986738831.1743919538; _gid=GA1.2.3134594.1743919538',
    }

    response = requests.get(f'https://www.wnacg.com{num}', cookies=cookies, headers=headers)
    print(response.status_code)

    pattern = r'<a class="down_btn ads" href="(.*?)"'  # 非贪婪匹配（避免截取多余内容）
    match = re.search(pattern, response.text)
    href_value = None
    if match:
        href_value = match.group(1)
        print(href_value)
    else:
        print("未找到 href 属性")
    return href_value


def get_zip(url):
    # 此方法适用于 全局代理方式代理访问
    proxy_url = "http://127.0.0.1:7890"  # 为本机代理端口及ip
    os.environ['HTTP_PROXY'] = proxy_url
    os.environ['HTTPS_PROXY'] = proxy_url

    full_url = f'https:{url}' if not url.startswith(
        'http') else url  # 优先 HTTPS 协议‌:ml-citation{ref="6,8" data="citationList"}
    file_param = url.split('?n=')[-1]
    safe_filename = unquote(file_param).replace(' ', '_') + '.zip'
    try:
        with requests.get(full_url, stream=True, allow_redirects=True) as response:
            response.raise_for_status()  # 检查 HTTP 错误状态码‌:ml-citation{ref="5" data="citationList"}
            with open(safe_filename, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):  # 分块写入文件‌:ml-citation{ref="5" data="citationList"}
                    f.write(chunk)
        print(f'文件已保存为：{safe_filename}')
    except Exception as e:
        print(f'下载失败：{str(e)}')


def get_pic(url, save_path):
    # 此方法适用于 全局代理方式代理访问
    proxy_url = "http://127.0.0.1:7890"  # 为本机代理端口及ip
    os.environ['HTTP_PROXY'] = proxy_url
    os.environ['HTTPS_PROXY'] = proxy_url
    try:
        # 补全协议头（自动处理）
        if not url.startswith(('http://', 'https://')):
            url = 'https://' + url.lstrip('/')
        response = requests.get(url, timeout=10)
        response.raise_for_status()  # 检查HTTP状态码
        with open(save_path+'.jpg', 'wb') as f:
            f.write(response.content)
        print(f"图片下载成功，保存路径：{save_path}")
    except requests.exceptions.RequestException as e:
        print(f"下载失败：{e}")


if __name__ == '__main__':
    n_list = get_num("3d", 0)
    # for i in n_list.items():
    #     print(i[0])
    #     result = get_pic_dir(i[1])
    #     get_zip(result)
