# 指定网页 下载 img url的图片

import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import concurrent.futures
import time
import re
from reName  import batch_rename
from collections import defaultdict



def download_image(img_info, save_folder, headers):
    """
    下载单张图片（内部函数）
    :param img_info: (index, img_url, full_url)
    :param save_folder: 保存目录
    :param headers: 请求头
    """
    idx, img_url, full_url = img_info
    try:
        # 避免重复文件名
        filename = generate_unique_filename(img_url, save_folder)
        save_path = os.path.join(save_folder, filename)

        # 发送带超时的请求
        with requests.get(full_url, headers=headers, timeout=15, stream=True) as response:
            response.raise_for_status()

            # 获取内容类型并设置文件扩展名
            content_type = response.headers.get('Content-Type', '')
            ext = content_type.split('/')[-1] if '/' in content_type else None

            # 优化扩展名处理
            if not ext or ext.lower() not in ['jpeg', 'jpg', 'png', 'gif', 'webp']:
                file_ext = os.path.splitext(filename)[1].lower()[1:]
                ext = file_ext if file_ext else 'jpg'

            # 确保文件名有正确的扩展名
            if not filename.lower().endswith(('.' + ext, '.jpg', '.jpeg', '.png', '.gif', '.webp')):
                filename = os.path.splitext(filename)[0] + '.' + ext
                save_path = os.path.join(save_folder, filename)

            # 流式写入文件
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)

        return True, save_path
    except Exception as e:
        return False, f"{full_url} - {str(e)}"


def generate_unique_filename(img_url, save_folder):
    """生成唯一的文件名，避免重复"""
    # 从URL中提取文件名
    filename = os.path.basename(img_url.split('?')[0])  # 去除查询参数

    # 如果URL没有文件名，则生成基于时间戳的文件名
    if not filename:
        filename = f"image_{int(time.time() * 1000)}"

    # 添加后缀（无扩展名时）
    if '.' not in filename:
        filename += '.jpg'

    # 处理非法字符
    filename = re.sub(r'[\\/:*?"<>|]', '_', filename)

    # 避免文件名重复
    base, ext = os.path.splitext(filename)
    counter = 1
    while os.path.exists(os.path.join(save_folder, filename)):
        filename = f"{base}_{counter}{ext}"
        counter += 1

    return filename


def download_images(url, save_folder='E:\\img'):
    """
    下载网页中的所有图片（使用多线程优化）
    :param url: 目标网页URL
    :param save_folder: 图片保存目录
    """
    start_time = time.time()

    # 创建保存目录
    os.makedirs(save_folder, exist_ok=True)
    print(f"图片将保存到: {os.path.abspath(save_folder)}")

    # 设置请求头模拟浏览器
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
        'Accept': 'image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.9',
        'Referer': url  # 添加Referer头有助于某些网站
    }

    try:
        # 获取网页内容（设置超时限制）
        print(f"正在访问目标网页: {url}")
        response = requests.get(url, headers=headers, timeout=15)
        response.raise_for_status()

        # 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')

        # 找到所有img标签
        img_tags = soup.find_all('img')
        print(f"找到 {len(img_tags)} 个图片标签")

        # 准备图片URL列表（去重）
        seen_urls = set()
        image_urls = []

        for i, img in enumerate(img_tags):
            img_url = img.get('src') or img.get('data-src')
            if not img_url:
                continue

            # 处理相对路径
            full_url = urljoin(url, img_url).split('?')[0]  # 去除查询参数

            # URL去重
            if full_url in seen_urls:
                continue
            seen_urls.add(full_url)

            image_urls.append((i, img_url, full_url))

        if not image_urls:
            print("警告: 没有找到有效的图片URL")
            return

        print(f"准备下载 {len(image_urls)} 张图片 (已过滤重复)...")

        # 使用线程池并行下载
        success_count = 0
        failed_list = []

        # 创建进度跟踪
        print("开始并行下载...")
        print("-" * 50)

        with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
            # 提交所有任务
            futures = {executor.submit(download_image, img, save_folder, headers): img for img in image_urls}

            # 处理完成的任务
            for future in concurrent.futures.as_completed(futures):
                img_info = futures[future]
                try:
                    success, result = future.result()
                    if success:
                        print(f"✓ 图片 {img_info[0] + 1}/{len(image_urls)} 已保存: {os.path.basename(result)}")
                        success_count += 1
                    else:
                        print(f"✗ 下载失败: {result}")
                        failed_list.append(result)
                except Exception as e:
                    error_msg = f"任务处理异常: {str(e)}"
                    print(f"✗ {error_msg}")
                    failed_list.append(error_msg)

        # 统计结果
        elapsed = time.time() - start_time
        print("\n" + "=" * 50)
        print(f"下载完成! 用时: {elapsed:.2f}秒")
        print(f"成功: {success_count}/{len(image_urls)}")
        print(f"失败: {len(failed_list)}")

        if failed_list:
            print("\n失败列表:")
            for fail in failed_list:
                print(f"  - {fail}")

        print(f"图片保存位置: {os.path.abspath(save_folder)}")

    except requests.exceptions.RequestException as e:
        print(f"网络请求失败: {e}")
    except Exception as e:
        print(f"发生未预期错误: {e}")


if __name__ == "__main__":
    # 使用示例
    target_url = "https://telegra.ph/JVID-NO045-%E7%92%83%E5%A5%88%E9%85%B1---%E4%B8%93%E4%B8%9A%E6%B3%A8%E5%B0%84%E5%B0%8F%E5%A4%A9%E4%BD%BF---P2-06-06"  # 替换为你的目标URL
    download_images(target_url)
    batch_rename("E:\\img", "P5")