#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import re
import sys
import logging
import argparse
from pathlib import Path
from urllib.parse import urlparse
from concurrent.futures import ThreadPoolExecutor, as_completed

import requests
from tqdm import tqdm

# ==================== 配置 ====================
DEFAULT_MAX_WORKERS = 4
DEFAULT_DOWNLOAD_DIR = "./downloads"
DEFAULT_LIST_FILE = "download.txt"
DEFAULT_LOG_FILE = "download.log"
# =============================================

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s | %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    handlers=[
        logging.FileHandler(DEFAULT_LOG_FILE, encoding='utf-8'),
        logging.StreamHandler(sys.stdout)  # 可选：也输出到终端（但 tqdm 可能冲突）
    ]
)
logger = logging.getLogger()

def parse_wget_line(line: str):
    """
    从 wget 命令行提取 user, password, url
    示例: wget -c --user=xxx --password=yyy "https://.../file"
    """
    user_match = re.search(r'--user=([^"\s]+)', line)
    pass_match = re.search(r'--password=([^"\s]+)', line)
    url_match = re.search(r'"(https?://[^"]+)"', line)

    if not url_match:
        # 尝试取最后一个非选项参数
        parts = line.split()
        url = parts[-1].strip('"')
    else:
        url = url_match.group(1)

    user = user_match.group(1) if user_match else None
    password = pass_match.group(1) if pass_match else None
    return user, password, url

def get_filename_from_url(url: str) -> str:
    return Path(urlparse(url).path).name or "unknown_file"

def download_file(args):
    url, user, password, download_dir, skip_existing, position = args
    filename = get_filename_from_url(url)
    filepath = Path(download_dir) / filename

    # 跳过已存在
    if skip_existing and filepath.exists():
        logger.info(f"{filename} | ⏭️ 跳过（文件已存在）")
        return filename, True, "skipped"

    # 获取文件大小（用于 tqdm）
    try:
        head_resp = requests.head(url, auth=(user, password), timeout=10)
        total_size = int(head_resp.headers.get('content-length', 0))
    except Exception:
        total_size = 0

    # 创建 tqdm 进度条（固定位置）
    desc = f"{filename[:40]:<40}"  # 截断长文件名
    with tqdm(
        total=total_size,
        unit='B',
        unit_scale=True,
        unit_divisor=1024,
        desc=desc,
        position=position,
        leave=True,
        miniters=1,
        dynamic_ncols=True
    ) as pbar:

        try:
            resp = requests.get(url, auth=(user, password), stream=True, timeout=30)
            resp.raise_for_status()

            with open(filepath, 'wb') as f:
                for chunk in resp.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
                        pbar.update(len(chunk))
            logger.info(f"{filename} | ✅ 成功")
            return filename, True, "success"
        except Exception as e:
            if filepath.exists():
                filepath.unlink()  # 删除不完整文件
            logger.info(f"{filename} | ❌ 失败: {str(e)[:100]}")
            return filename, False, "failed"

def main():
    parser = argparse.ArgumentParser(description="并发下载工具")
    parser.add_argument("-j", "--jobs", type=int, default=DEFAULT_MAX_WORKERS, help="并发数")
    parser.add_argument("-d", "--dir", default=DEFAULT_DOWNLOAD_DIR, help="下载目录")
    parser.add_argument("-f", "--file", default=DEFAULT_LIST_FILE, help="下载列表文件")
    parser.add_argument("--no-skip", action="store_true", help="不跳过已存在文件")
    args = parser.parse_args()

    download_dir = Path(args.dir)
    download_dir.mkdir(parents=True, exist_ok=True)

    if not Path(args.file).is_file():
        print(f"错误：下载列表文件 {args.file} 不存在！")
        sys.exit(1)

    # 读取任务
    tasks = []
    with open(args.file, 'r', encoding='utf-8') as f:
        for line in f:
            line = line.strip()
            if line and not line.startswith('#'):
                try:
                    user, password, url = parse_wget_line(line)
                    if url:
                        tasks.append((url, user, password))
                except Exception as e:
                    print(f"解析失败: {line} - {e}")

    if not tasks:
        print("警告：没有有效下载任务。")
        return

    print(f"共 {len(tasks)} 个任务，使用 {args.jobs} 个并发线程。\n")

    # 为每个任务分配 position（0 到 jobs-1 循环）
    task_args = [
        (
            url, user, password,
            str(download_dir),
            not args.no_skip,
            i % args.jobs  # position 用于 tqdm 多行显示
        )
        for i, (url, user, password) in enumerate(tasks)
    ]

    success_count = 0
    with ThreadPoolExecutor(max_workers=args.jobs) as executor:
        futures = [executor.submit(download_file, arg) for arg in task_args]
        for future in as_completed(futures):
            _, ok, _ = future.result()
            if ok and _ == "success":
                success_count += 1

    print(f"\n🎉 全部完成！成功: {success_count}/{len(tasks)}")
    logger.info(f"🎉 全部完成！成功: {success_count}/{len(tasks)}")

if __name__ == "__main__":
    main()
