#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import requests
import argparse
import time
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
import random
import re

def download_image(url, save_dir, idx=None):
    """
    下载单个图片并保存到指定目录
    
    参数:
        url (str): 图片URL地址
        save_dir (str): 保存图片的目录
        idx (int, optional): 图片索引，用于命名
    
    返回:
        tuple: (成功标志, 文件路径或错误信息)
    """
    try:
        # 从URL中提取文件名
        file_name = os.path.basename(url.split('?')[0])
        
        # 检测图片格式
        image_format = None
        format_pattern = r'\.(jpg|jpeg|png|gif|webp|avif|bmp)($|\?)'
        format_match = re.search(format_pattern, url.lower())
        if format_match:
            image_format = format_match.group(1)
        
        # 如果URL中没有合适的文件名或无法识别格式，则使用索引创建一个
        if not file_name or '.' not in file_name or not image_format:
            # 尝试从Content-Type获取格式
            try:
                head_response = requests.head(url, timeout=5)
                content_type = head_response.headers.get('Content-Type', '')
                if 'image/jpeg' in content_type:
                    image_format = 'jpg'
                elif 'image/png' in content_type:
                    image_format = 'png'
                elif 'image/webp' in content_type:
                    image_format = 'webp'
                elif 'image/gif' in content_type:
                    image_format = 'gif'
                elif 'image/bmp' in content_type:
                    image_format = 'bmp'
                elif 'image/avif' in content_type:
                    image_format = 'avif'
                else:
                    # 默认使用jpg
                    image_format = 'jpg'
            except Exception:
                # 如果无法获取Content-Type，使用默认格式
                image_format = 'jpg'
            
            # 生成文件名
            file_name = f"image_{idx if idx is not None else int(time.time())}_{random.randint(1000, 9999)}.{image_format}"
        
        # 构建保存路径
        file_path = os.path.join(save_dir, file_name)
        
        # 检查文件是否已存在
        if os.path.exists(file_path):
            return True, file_path  # 文件已存在，跳过下载
        
        # 设置请求头，模拟浏览器
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Referer': 'http://3c.xy05.my/',
            'Accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8'
        }
        
        # 发送HTTP GET请求获取图片
        response = requests.get(url, stream=True, timeout=30, headers=headers)
        response.raise_for_status()  # 检查请求是否成功
        
        # 保存图片到本地
        with open(file_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                f.write(chunk)
        
        # 随机延迟，避免请求过快
        time.sleep(random.uniform(0.1, 0.5))
        
        return True, file_path
    
    except Exception as e:
        return False, f"下载失败 {url}: {str(e)}"

def download_images_from_file(file_path, save_dir, max_threads=10):
    """
    从文件中读取图片URL列表并并行下载图片
    
    参数:
        file_path (str): 包含图片URL的文件路径，每行一个URL
        save_dir (str): 保存图片的目录
        max_threads (int): 最大线程数
    """
    # 确保保存目录存在
    os.makedirs(save_dir, exist_ok=True)
    
    # 读取URL列表
    try:
        with open(file_path, 'r') as f:
            urls = [line.strip() for line in f if line.strip()]
    except Exception as e:
        print(f"读取文件失败: {str(e)}")
        return
    
    if not urls:
        print("文件中没有找到URL")
        return
    
    print(f"找到 {len(urls)} 个图片URL地址")
    
    # 成功和失败计数器
    success_count = 0
    failed_count = 0
    failed_urls = []
    
    # 使用线程池并行下载图片
    with ThreadPoolExecutor(max_workers=max_threads) as executor:
        # 提交所有下载任务
        future_to_url = {
            executor.submit(download_image, url, save_dir, idx): url 
            for idx, url in enumerate(urls)
        }
        
        # 使用tqdm显示进度条
        with tqdm(total=len(urls), desc="下载进度") as pbar:
            for future in future_to_url:
                url = future_to_url[future]
                try:
                    success, result = future.result()
                    if success:
                        success_count += 1
                    else:
                        failed_count += 1
                        failed_urls.append((url, result))
                except Exception as e:
                    failed_count += 1
                    failed_urls.append((url, str(e)))
                
                pbar.update(1)
    
    # 打印下载结果统计
    print(f"\n下载完成！成功: {success_count}, 失败: {failed_count}")
    
    # 如果有失败的下载，输出详情
    if failed_urls:
        print("\n失败的下载:")
        for url, error in failed_urls[:10]:  # 只显示前10个错误
            print(f"- {url}: {error}")
        
        if len(failed_urls) > 10:
            print(f"... 以及其他 {len(failed_urls) - 10} 个错误")
        
        # 将失败的URL保存到文件
        failed_file = os.path.join(save_dir, "failed_downloads.txt")
        with open(failed_file, 'w') as f:
            for url, _ in failed_urls:
                f.write(f"{url}\n")
        print(f"\n失败的URL已保存到 {failed_file}")

def main():
    """主函数，处理命令行参数并启动下载流程"""
    # 创建命令行参数解析器
    parser = argparse.ArgumentParser(description="批量下载图片工具")
    parser.add_argument('file', help='包含图片URL的文件路径，每行一个URL')
    parser.add_argument('-d', '--dir', default='downloads', help='保存图片的目录 (默认: "downloads")')
    parser.add_argument('-t', '--threads', type=int, default=10, help='最大线程数 (默认: 10)')
    parser.add_argument('-r', '--retry', type=int, default=3, help='下载失败重试次数 (默认: 3)')
    
    # 解析命令行参数
    args = parser.parse_args()
    
    # 开始计时
    start_time = time.time()
    
    # 执行下载
    download_images_from_file(args.file, args.dir, args.threads)
    
    # 计算并显示总耗时
    elapsed_time = time.time() - start_time
    print(f"总耗时: {elapsed_time:.2f} 秒")

if __name__ == "__main__":
    main() 