import argparse
import csv
import json
import os
import re
import sys
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass
from typing import Dict, Iterable, List, Optional, Tuple

import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry


DEFAULT_WORKERS = 6
DEFAULT_TIMEOUT = 30
CHUNK_SIZE = 1024 * 256  # 256KB


@dataclass
class DownloadTask:
    name: str
    url: str
    filename: str


class ThreadSafeCounter:
    def __init__(self) -> None:
        self._lock = threading.Lock()
        self._counts: Dict[str, int] = {
            "success": 0,
            "skipped": 0,
            "failed": 0,
        }

    def inc(self, key: str) -> None:
        with self._lock:
            self._counts[key] += 1

    @property
    def counts(self) -> Dict[str, int]:
        with self._lock:
            return dict(self._counts)


def create_session(timeout: int) -> requests.Session:
    session = requests.Session()
    retry = Retry(
        total=5,
        backoff_factor=0.6,
        status_forcelist=[429, 500, 502, 503, 504],
        allowed_methods=["GET"],
        raise_on_status=False,
    )
    adapter = HTTPAdapter(max_retries=retry, pool_connections=DEFAULT_WORKERS, pool_maxsize=DEFAULT_WORKERS * 2)
    session.mount("http://", adapter)
    session.mount("https://", adapter)
    session.headers.update({
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
        "Accept": "*/*",
        "Connection": "keep-alive",
    })
    # Attach default timeout via wrapper
    session.request = _with_default_timeout(session.request, timeout)  # type: ignore
    return session


def _with_default_timeout(request_func, timeout: int):
    def wrapper(method, url, **kwargs):
        if kwargs.get("timeout") is None:
            kwargs["timeout"] = timeout
        return request_func(method, url, **kwargs)

    return wrapper


def ensure_dir(path: str) -> None:
    os.makedirs(path, exist_ok=True)


INVALID_FILENAME_CHARS = r"[^\w\-\.\u4e00-\u9fff\s]"  # keep letters, numbers, underscore, dash, dot, CJK, space
MULTISPACE_RE = re.compile(r"\s+")


def sanitize_filename(name: str) -> str:
    name = name.strip()
    name = re.sub(INVALID_FILENAME_CHARS, "_", name)
    name = MULTISPACE_RE.sub(" ", name)
    name = name.strip(" .-")
    if not name:
        name = "untitled"
    # Limit length to avoid OS issues
    return name[:180]


def coerce_mp3_filename(name: str) -> str:
    if not name.lower().endswith(".mp3"):
        return f"{name}.mp3"
    return name


def read_mapping(input_path: str) -> List[Tuple[str, str]]:
    ext = os.path.splitext(input_path)[1].lower()
    if ext == ".csv":
        return _read_csv_mapping(input_path)
    if ext == ".json":
        return _read_json_mapping(input_path)
    raise ValueError(f"不支持的清单格式: {ext}，请使用 .csv 或 .json")


def _read_csv_mapping(path: str) -> List[Tuple[str, str]]:
    pairs: List[Tuple[str, str]] = []
    with open(path, "r", encoding="utf-8-sig", newline="") as f:
        reader = csv.DictReader(f)
        headers = {h.strip().lower() for h in reader.fieldnames or []}
        # 支持 header: name,url 或 title,url 或 filename,url
        name_key = next((k for k in reader.fieldnames or [] if k.strip().lower() in {"name", "title", "filename"}), None)
        url_key = next((k for k in reader.fieldnames or [] if k.strip().lower() in {"url", "link"}), None)
        if not name_key or not url_key:
            raise ValueError(f"CSV 需要包含 'name' 与 'url' 列，当前列为: {headers}")
        for row in reader:
            raw_name = str(row.get(name_key, "")).strip()
            raw_url = str(row.get(url_key, "")).strip()
            if not raw_url:
                continue
            if not raw_name:
                raw_name = os.path.basename(raw_url.split("?")[0]) or "untitled"
            pairs.append((raw_name, raw_url))
    return pairs


def _read_json_mapping(path: str) -> List[Tuple[str, str]]:
    pairs: List[Tuple[str, str]] = []
    with open(path, "r", encoding="utf-8") as f:
        data = json.load(f)
    if isinstance(data, dict):
        for name, url in data.items():
            if url:
                pairs.append((str(name), str(url)))
        return pairs
    if isinstance(data, list):
        for item in data:
            if isinstance(item, dict):
                name = str(item.get("name") or item.get("title") or item.get("filename") or "")
                url = str(item.get("url") or item.get("link") or "")
                if url:
                    if not name:
                        name = os.path.basename(url.split("?")[0]) or "untitled"
                    pairs.append((name, url))
        return pairs
    raise ValueError("JSON 格式应为对象{name:url}或数组[{name,url}]")


def build_tasks(pairs: Iterable[Tuple[str, str]], out_dir: str) -> List[DownloadTask]:
    tasks: List[DownloadTask] = []
    for name, url in pairs:
        safe = sanitize_filename(name)
        filename = coerce_mp3_filename(safe)
        tasks.append(DownloadTask(name=safe, url=url, filename=os.path.join(out_dir, filename)))
    return tasks


def file_exists_and_complete(path: str) -> bool:
    if not os.path.exists(path):
        return False
    # 简单以 > 1KB 认为不是空文件
    return os.path.getsize(path) > 1024


def download_one(session: requests.Session, task: DownloadTask, overwrite: bool = False) -> Tuple[str, str, str, Optional[str]]:
    try:
        if os.path.exists(task.filename):
            if overwrite:
                pass
            elif file_exists_and_complete(task.filename):
                return (task.name, task.url, "skipped", "已存在，跳过")
        tmp_path = f"{task.filename}.part"
        # 清理旧的 .part
        if os.path.exists(tmp_path):
            try:
                os.remove(tmp_path)
            except OSError:
                pass
        with session.get(task.url, stream=True) as resp:
            if resp.status_code >= 400:
                return (task.name, task.url, "failed", f"HTTP {resp.status_code}")
            ensure_dir(os.path.dirname(task.filename))
            with open(tmp_path, "wb") as f:
                for chunk in resp.iter_content(chunk_size=CHUNK_SIZE):
                    if chunk:
                        f.write(chunk)
        os.replace(tmp_path, task.filename)
        return (task.name, task.url, "success", None)
    except Exception as e:
        # 尝试清理临时文件
        tmp_path = f"{task.filename}.part"
        if os.path.exists(tmp_path):
            try:
                os.remove(tmp_path)
            except OSError:
                pass
        return (task.name, task.url, "failed", str(e))


def run_downloads(tasks: List[DownloadTask], workers: int, timeout: int, overwrite: bool) -> Dict[str, int]:
    counter = ThreadSafeCounter()
    session = create_session(timeout)

    with ThreadPoolExecutor(max_workers=workers) as executor:
        future_to_task = {executor.submit(download_one, session, task, overwrite): task for task in tasks}
        for future in as_completed(future_to_task):
            task = future_to_task[future]
            name, url, status, message = future.result()
            if status == "success":
                counter.inc("success")
                print(f"[OK] {name}")
            elif status == "skipped":
                counter.inc("skipped")
                print(f"[SKIP] {name} - {message}")
            else:
                counter.inc("failed")
                print(f"[FAIL] {name} <- {url} | {message}")

    return counter.counts


def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="批量下载 MP3：提供名称与URL清单，支持并发与重试")
    parser.add_argument("--input", required=True, help="清单文件路径 (.csv 或 .json)")
    parser.add_argument("--outdir", default=os.path.join(os.path.dirname(__file__), "downloads"), help="下载输出目录")
    parser.add_argument("--workers", type=int, default=DEFAULT_WORKERS, help=f"并发下载数，默认 {DEFAULT_WORKERS}")
    parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT, help=f"单请求超时秒数，默认 {DEFAULT_TIMEOUT}")
    parser.add_argument("--overwrite", action="store_true", help="若目标文件存在则覆盖")
    return parser.parse_args(argv)


def main(argv: Optional[List[str]] = None) -> int:
    args = parse_args(argv)
    ensure_dir(args.outdir)
    try:
        pairs = read_mapping(args.input)
    except Exception as e:
        print(f"读取清单失败: {e}")
        return 2
    if not pairs:
        print("清单为空，无需下载")
        return 0
    tasks = build_tasks(pairs, args.outdir)
    print(f"共 {len(tasks)} 首，开始下载 -> {args.outdir}，并发: {args.workers}")
    summary = run_downloads(tasks, args.workers, args.timeout, args.overwrite)
    print("\n下载完成：")
    print(f"  成功: {summary['success']}")
    print(f"  跳过: {summary['skipped']}")
    print(f"  失败: {summary['failed']}")
    return 0 if summary["failed"] == 0 else 1


if __name__ == "__main__":
    sys.exit(main()) 