import os
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from urllib.parse import urljoin, urlparse
import urllib3
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading

# 禁用SSL证书警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 全局配置
VALID_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.gif', '.webp']
MAX_WORKERS = 5  # 并发线程数
LOCK = threading.Lock()  # 线程锁


def download_images(url, save_dir, proxies=None):
    """
    下载指定网页中的所有图片到本地目录

    参数:
        url (str): 目标网页URL
        save_dir (str): 图片保存目录
        proxies (dict): 代理配置
    """
    try:
        os.makedirs(save_dir, exist_ok=True)

        # 带代理的请求
        response = requests.get(url, proxies=proxies, verify=False)
        response.raise_for_status()

        soup = BeautifulSoup(response.content, 'html.parser')
        img_urls = [urljoin(url, img.get('src')) for img in soup.find_all('img')]

        # 初始化计数器
        counters = {'success': 0, 'skip': 0}

        # 创建线程池
        with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
            futures = []
            for img_url in img_urls:
                if not is_valid_url(img_url):
                    with LOCK:
                        counters['skip'] += 1
                    continue
                futures.append(executor.submit(
                    download_single_image,
                    img_url, save_dir, proxies, counters
                ))

            # 显示总体进度
            with tqdm(total=len(futures), desc="总体进度") as pbar:
                for _ in as_completed(futures):
                    pbar.update(1)

        print(f"\n下载完成！成功: {counters['success']} 张，跳过: {counters['skip']} 张")

    except requests.exceptions.RequestException as e:
        print(f"\n无法访问网页: {str(e)}")


def is_valid_url(url):
    """验证URL有效性"""
    if url.startswith('data:'):
        return False
    parsed = urlparse(url)
    ext = os.path.splitext(parsed.path)[1].lower()
    return ext in VALID_EXTENSIONS


def generate_filename(url, save_dir):
    """生成唯一文件名"""
    parsed = urlparse(url)
    base_name = os.path.basename(parsed.path) or f"image_{hash(url)}"
    ext = os.path.splitext(base_name)[1]

    # 默认扩展名处理
    if not ext:
        base_name += ".jpg"

    # 唯一文件名生成
    counter = 0
    while True:
        filename = f"{os.path.splitext(base_name)[0]}_{counter}{ext}" if counter else base_name
        save_path = os.path.join(save_dir, filename)
        if not os.path.exists(save_path):
            return save_path
        counter += 1


def download_single_image(url, save_dir, proxies, counters):
    """下载单个图片（线程安全）"""
    try:
        # 生成唯一文件名
        with LOCK:
            save_path = generate_filename(url, save_dir)

        # 带代理的下载请求
        response = requests.get(url, stream=True, proxies=proxies, verify=False)
        response.raise_for_status()

        # 进度条配置
        total_size = int(response.headers.get('Content-Length', 0))
        progress_bar = tqdm(
            total=total_size,
            unit='B',
            unit_scale=True,
            desc=os.path.basename(save_path)[:30],
            bar_format='{l_bar}{bar:30}{r_bar}{bar:-30b}',
            leave=False  # 下载完成后自动清除
        )

        # 写入文件
        with open(save_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=1024):
                if chunk:
                    f.write(chunk)
                    progress_bar.update(len(chunk))

        progress_bar.close()
        with LOCK:
            counters['success'] += 1
    except Exception as e:
        with LOCK:
            counters['skip'] += 1
        return f"失败: {url} - {str(e)}"
    return None


if __name__ == "__main__":
    # 用户配置输入
    web_url = input("请输入要抓取的网页URL: ").strip()
    save_folder = input("请输入保存目录（默认: ./images）: ").strip() or "images"

    # 代理配置
    proxies = None
    if input("是否使用代理？(y/n): ").lower() == 'y':
        proxies = {
            'http': input("HTTP代理地址（例 127.0.0.1:8080）: "),
            'https': input("HTTPS代理地址（例 127.0.0.1:8080）: ")
        }

    # 执行下载
    download_images(web_url, save_folder, proxies)
