#!/usr/bin/env python3
import os
import csv
import requests
from tqdm import tqdm
import argparse
import time
import random
import glob
from urllib.parse import urlparse

def download_images(csv_files, output_dir, max_images=None):
    """从多个CSV文件读取URL并下载图片到指定目录"""
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)

    # 收集所有URL
    all_urls = []
    for csv_file in csv_files:
        with open(csv_file, mode='r', encoding='utf-8-sig') as f:
            reader = csv.reader(f)
            header = next(reader)  # 跳过标题行
            for row in reader:
                if row:  # 确保行不为空
                    url = row[0].strip()
                    all_urls.append(url)

    # 限制下载数量
    if max_images and max_images < len(all_urls):
        all_urls = random.sample(all_urls, max_images)

    # 下载图片
    downloaded = 0
    skipped = 0
    for url in tqdm(all_urls, desc="下载所有图片"):
        try:
            # 从URL中提取唯一的文件名
            parsed = urlparse(url)
            path_parts = parsed.path.split('/')
            # 保留最后两部分作为文件名（类别+文件名）
            filename = f"{path_parts[-2]}_{path_parts[-1].split('?')[0]}"
            file_path = os.path.join(output_dir, filename)

            # 如果文件已存在，跳过
            if os.path.exists(file_path):
                skipped += 1
                continue

            response = requests.get(url, stream=True)
            response.raise_for_status()

            with open(file_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)

            downloaded += 1
            # 避免请求过快
            time.sleep(0.1)

        except Exception as e:
            print(f"下载失败 {url}: {e}")

    return downloaded, skipped, len(all_urls)

def main():
    parser = argparse.ArgumentParser(description='下载水果数据集（所有图片合并）')
    parser.add_argument('--csv_files', nargs='+', required=True,
                        help='CSV文件路径列表（空格分隔）')
    parser.add_argument('--output_dir', type=str, default='./datasets/fruits_all',
                        help='输出目录')
    parser.add_argument('--max_images', type=int, default=1000,
                        help='最大下载图片数量')

    args = parser.parse_args()

    print("\n" + "="*50)
    print("下载所有图片...")
    downloaded, skipped, total = download_images(
        args.csv_files,
        args.output_dir,
        args.max_images
    )

    print("\n" + "="*50)
    print("数据集下载完成")
    print(f"尝试下载总数: {total}")
    print(f"成功下载: {downloaded}")
    print(f"跳过已存在: {skipped}")
    print(f"数据集路径: {os.path.abspath(args.output_dir)}")
    print(f"总图片数量: {len(os.listdir(args.output_dir))}")

if __name__ == "__main__":
    main()
