#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
多线程图片下载器

这个模块提供了一个高效的多线程图片下载器，可以同时下载多张图片，
支持各种图片链接格式，并提供下载统计和日志功能。

用法示例:
    # 作为模块导入使用
    from img_downloader import ImageDownloader

    downloader = ImageDownloader(max_workers=10, timeout=30)
    urls = ['http://example.com/image1.jpg', 'https://example.com/image2.png']
    downloader.download(urls, output_dir='./images')

    # 指定文件名下载
    name_url_pairs = [('custom_name1.jpg', 'http://example.com/image1.jpg'),
                      ('custom_name2.png', 'https://example.com/image2.png')]
    downloader.download_with_names(name_url_pairs, output_dir='./images')

    # 命令行使用
    # python img_downloader.py -i urls.txt -o ./images -w 10
"""
import concurrent.futures
import logging
import os
import random
import re
import threading
import time
from logging import debug, error, exception, info, warning
from typing import Any, Dict, List, Optional

import requests
from filenamehandler import FileNameHandler
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

IMG_DIR = "./images"
# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    handlers=[logging.StreamHandler(), logging.FileHandler("img_downloader.log")],
)
# 日志记录器
logger = logging.getLogger("ImageDownloader")
# 文件名处理器
fh = FileNameHandler()
# 配置使用的User-Agent(过长可以用括号包裹配合+号分隔字符串)

# 预设多个常见浏览器的 User-Agent
USER_AGENTS = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
    "(KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    #
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 "
    "(KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
    #
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 "
    "(KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
    #
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 "
    "(KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
]

# 要注意`:`分号不要用,url的协议名比如https://就带有冒号,而逗号也有风险,部分链接包含逗号!
URL_SEPARATORS = [
    r"\s+",
    ">",
    # ";",
    # ",",
]
COMMON_SEPARATORS = [",", ";", r"\s+"]
URL_SEP_PATTERN = "|".join(URL_SEPARATORS)
COMMON_SEP_PATTERN = "|".join(COMMON_SEPARATORS)

URL_SEP_REGEXP = re.compile(URL_SEP_PATTERN)
COMMON_SEP_REGEXP = re.compile(COMMON_SEP_PATTERN)
info(f"SEP_PATTERN: {URL_SEP_PATTERN}")
# 有些网站需要登录才能访问资源。你可以手动获取登录后的 Cookie，并在每次请求中携带。
COOKIES = {"sessionid": "abc123xyz", "csrftoken": "csrf_token_here"}



class DownloadStatistics:
    """下载统计类，用于记录和展示下载统计信息"""

    def __init__(self):
        self.total = 0
        self.success = 0

        # self.progress = 0
        self.task_index = 0
        self.index_lock = threading.Lock()

        self.failed = 0
        self.skipped = 0
        self.start_time = time.time()
        self.end_time = None
        self.failed_urls = []

    def add_success(self):
        """记录一次成功下载"""
        self.success += 1

    def add_failed(self, url, name=""):
        """记录一次下载失败

        这里如果用户的下载链接同时指定了保存名字,则连同保存的名字一起记录到失败列表中
        一般来说,图片链接中提取出来的文件名不会带有空格
        """
        self.failed += 1
        line = f"{name} {url}".strip()
        self.failed_urls.append(line)

    def add_skipped(self):
        """记录一次跳过下载"""
        self.skipped += 1

    def set_total(self, total: int):
        """设置总下载数量"""
        self.total = total

    def finish(self):
        """完成下载，记录结束时间"""
        self.end_time = time.time()
        self.save_failed_urls()

    def save_failed_urls(self, file_path="failed_urls.txt"):
        """保存失败的URL到文件,供后续此重试"""
        info(f"Saving failed URLs to {file_path}")
        info(f"Failed URLs: [{self.failed_urls}]")
        with open(file=file_path, mode="w", encoding="utf-8") as f:
            for url in self.failed_urls:
                f.write(url + "\n")

    def get_elapsed_time(self) -> float:
        """获取下载耗时（秒）"""
        if self.end_time:
            return self.end_time - self.start_time
        return time.time() - self.start_time

    def get_summary(self) -> Dict[str, Any]:
        """获取下载统计摘要"""
        return {
            "total": self.total,
            "success": self.success,
            "failed": self.failed,
            "skipped": self.skipped,
            "elapsed_time": self.get_elapsed_time(),
            "failed_urls": self.failed_urls,
        }

    def print_summary(self):
        """打印下载统计摘要"""
        summary = self.get_summary()
        info("=" * 50)
        info("下载统计摘要:")
        info(f"总计: {summary['total']} 张图片")
        info(f"成功: {summary['success']} 张图片")
        info(f"下载时跳过: {summary['skipped']} 张图片")
        info(f"失败: {summary['failed']} 张图片")
        info(f"耗时: {summary['elapsed_time']:.2f} 秒")

        if summary["failed"] > 0:
            info("失败的URL:")
            for url in summary["failed_urls"]:
                info(f"  - {url}")
        info("=" * 50)


class ImageDownloader:
    """多线程图片下载器"""

    def __init__(
        self,
        max_workers: int = 10,
        timeout: int = 30,
        retry_times: int = 2,
        user_agent: Optional[str] = None,
        cookies: Optional[Dict[str, str]] = None,
        verify_ssl: bool = True,
        proxies=None,
        proxy_strategy="round_robin",
    ):
        """
        初始化图片下载器

        Args:
            max_workers: 最大工作线程数
            timeout: 下载超时时间（秒）
            retry_times: 下载失败重试次数(设置为1表示只给1次下载机会,不做额外重试,如果是0,则不做真正的下载)
            user_agent: 自定义User-Agent
            cookies: 自定义Cookie
            verify_ssl: 是否验证SSL证书(启用会提高安全性，但会降低下载速度)
            proxies: 代理设置,格式为{'http': 'http://proxy.example.com:8080',
                'https': 'https://proxy.example.com:8080'}
            proxy_strategy: 代理选择策略,可选值为'round_robin'（轮询）和'random'（随机）
        """
        self.max_workers = max_workers
        self.timeout = timeout
        self.retry_times = retry_times
        self.verify_ssl = verify_ssl
        self.cookies = cookies
        self.stats = DownloadStatistics()
        if retry_times < 1:
            warning("retry_times smaller than 1, no retry will be performed.")
        # 初始化会话
        self.session = requests.Session()
        # 创建具有重试策略的适配器
        adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1))
        self.session.mount("http://", adapter)
        self.session.mount("https://", adapter)

        if cookies:
            self.session.cookies.update(cookies)
        # 设置User-Agent
        self.headers = {
            "User-Agent": user_agent or random.choice(USER_AGENTS),
            "Referer": f"https://{random.choice(['google.com', 'bing.com'])}/",
            "Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept": "text/html,application/xhtml+xml,application/xml;\
                q=0.9,image/avif,image/webp,*/*;q=0.8",
        }
        self.proxies = proxies or []
        self.proxy_strategy = proxy_strategy
        self.proxy_index = 0

    def get_proxy(self):
        """
        获取代理配置策略
        """
        if not self.proxies:
            return None
        if self.proxy_strategy == "round_robin":
            # 轮询代理(更新索引)
            proxy = self.proxies[self.proxy_index % len(self.proxies)]
            self.proxy_index += 1
            return proxy
        elif self.proxy_strategy == "random":
            # 简单的随机代理
            return random.choice(self.proxies)
        return None

    def _download_single_image(
        self,
        url: str,
        output_dir: str,
        filename: Optional[str] = None,
        try_get_ext=True,
        override=False,
        retry_gap=1,
    ) -> bool:
        """
        下载单张图片

        for chunk in response.iter_content(chunk_size=8*2**10):
        从 HTTP 响应中按块读取内容，每块大小为 8192 字节（8KB）。
        这样做可以防止一次性加载整个文件到内存，适合大文件下载。

        Args:
            url: 图片URL
            output_dir: 输出目录
            filename: 自定义文件名，如果为None则自动生成
            try_get_ext: 如果filename缺少扩展名,是否尝获取文件扩展名
            override: 是否覆盖已存在文件(如果不覆盖则跳过)
            retry_gap: 失败重试间隔(秒)

        Returns:
            bool: 下载是否成功
        """
        # 更新/修改当前下载的进度
        with self.stats.index_lock:
            self.stats.task_index += 1
            # 在释放锁之前,获取当前下载的进度(退出后用self.stats.task_index获取的进度往往是不正确的)
            current_index = self.stats.task_index
        info(f"downloading ({current_index}/{self.stats.total}): {filename}:{url} ")
        # 如果传入的文件名没有扩展名,且在try_get_ext为True时,则尝试补全扩展名
        if try_get_ext:
            filename = self.complete_extension(filename, url)

        # 配置下载中如果出现失败的重试循环(次数由retry_times指定)
        for attempt in range(self.retry_times):
            # 如果某次尝试下载成功,则直接返回True(结束此下载任务)
            try:
                # # 模拟用户行为：每次请求前添加随机等待,在失败后指数退避
                # time.sleep(random.uniform(0.5, 2))
                # 下载图片(酌情启用stream参数可以实现流式下载,减少内存占用,配合后面的iter_content方法使用)
                response = self.session.get(
                    url=url,
                    timeout=self.timeout,
                    verify=self.verify_ssl,
                    stream=True,
                    # proxies={"https": self.get_proxy()},  # 使用代理
                )
                response.raise_for_status()

                content_type = response.headers.get("Content-Type", "")
                if not content_type.startswith("image/"):
                    error(f"响应不是图片类型: {url} -> Content-Type={content_type}")
                    self.stats.add_failed(url, name=filename or "")
                    return False

                debug(f"下载图片: {url} (尝试 {attempt+1}/{self.retry_times})")
                # 如果用户没有指定文件名,则按照默认策略生成文件名
                debug(f"获得文件名: {filename}")
                if not filename:
                    filename = fh.generate_filename_from_url(url=url, response=response)

                # 确保输出目录存在(如果路径尚不存在则逐级创建,否则略过,也不报错)
                os.makedirs(output_dir, exist_ok=True)

                # 保存图片(写入二进制文件)
                file_path = os.path.join(output_dir, filename)
                if os.path.exists(file_path) and not override:
                    info(f"文件已存在,跳过: {file_path}")
                    self.stats.add_skipped()
                    return True
                else:
                    with open(file_path, "wb") as f:
                        # 分块写入响应内容
                        for chunk in response.iter_content(chunk_size=8192):
                            if chunk:
                                f.write(chunk)

                    file_size = os.path.getsize(file_path)
                    info(f"成功下载: {url} -> {file_path} ({file_size} 字节)")
                    self.stats.add_success()
                return True

            except requests.exceptions.RequestException as e:
                # 如果是应为请求异常导致的下载失败,这在这里捕获;
                warning(
                    f"下载失败 (尝试 {attempt+1}/{self.retry_times}): {url}, 错误: {str(e)}"
                )
                # 如果还有重试的机会,则等待一段时间后回到循环再重试
                if attempt < self.retry_times - 1:
                    wait_time = retry_gap * (2**attempt) + random.uniform(0, 1)
                    time.sleep(wait_time)
                else:
                    # 尝试次机会用完,直接报错并返回False
                    wait_time = retry_gap * (2**attempt) + random.uniform(0, 1)
                    time.sleep(wait_time)
                    error(f"下载失败: {url}, 错误: {str(e)}")
                    self.stats.add_failed(url, name=filename or "")
                    return False

        return False

    def complete_extension(self, filename, url):
        """补全文件扩展名
        Args:
            url:文件资源的url(当filename缺少扩展名时,尝试从对应文件的url中获取)
        如果输入的filename没有扩展名,则尝试补全扩展名
        如果已经有扩展名,则返回原本地filename
        """
        _, ext = os.path.splitext(p=filename or "")
        if filename and not ext:
            ext = fh.get_file_extension(url=url)
            filename = f"{filename}{ext}"
        return filename

    def download(self, urls: List[Any], output_dir: str = IMG_DIR) -> Dict[str, Any]:
        """
        下载多张图片

        Args:
            urls: 图片URL列表
            output_dir: 输出目录

        Returns:
            Dict: 下载统计信息
        """
        info(f"开始下载 {len(urls)} 张图片到 {output_dir}")

        # 初始化统计信息
        self.stats = DownloadStatistics()
        self.stats.set_total(len(urls))

        # 创建输出目录
        os.makedirs(output_dir, exist_ok=True)

        # 使用线程池下载图片
        with concurrent.futures.ThreadPoolExecutor(
            max_workers=self.max_workers
        ) as executor:
            future_to_url = {
                executor.submit(self._download_single_image, url, output_dir): url
                for url in urls
            }

            for future in concurrent.futures.as_completed(future_to_url):
                url = future_to_url[future]
                try:
                    future.result()

                except Exception as e:
                    exception(f"处理下载时发生异常: {url}, 错误: {str(e)}")
                    self.stats.add_failed(url)

        # 完成下载，打印统计信息
        self.stats.finish()
        self.stats.print_summary()

        return self.stats.get_summary()

    def download_with_names(
        self, name_url_pairs: List[Any], output_dir: str = IMG_DIR
    ) -> Dict[str, Any]:
        """
        使用自定义文件名下载多张图片

        Args:
            name_url_pairs: (文件名, URL)元组列表
            output_dir: 输出目录

        Returns:
            Dict: 下载统计信息
        """
        info(f"开始下载 {len(name_url_pairs)} 张图片到 {output_dir}")

        # 初始化统计信息
        self.stats = DownloadStatistics()
        self.stats.set_total(len(name_url_pairs))

        # 创建输出目录
        os.makedirs(name=output_dir, exist_ok=True)

        # 使用线程池下载图片
        with concurrent.futures.ThreadPoolExecutor(
            max_workers=self.max_workers
        ) as executor:
            # 使用字典解析式创建和存储任务{future: (filename, url)}
            future_to_pair = {
                executor.submit(
                    self._download_single_image,
                    url=url,
                    output_dir=output_dir,
                    filename=filename,
                ): (filename, url)
                for filename, url in name_url_pairs
            }

            for future in concurrent.futures.as_completed(fs=future_to_pair):
                filename, url = future_to_pair[future]
                try:
                    future.result()
                    # success = future.result()
                    # if success:
                    #     self.stats.add_success()
                    # else:
                    #     self.stats.add_failed(url)
                except Exception as e:
                    failed_dict = {filename: url}
                    exception(f"处理{failed_dict}下载时发生异常, 错误:{str(e)}")
                    self.stats.add_failed(url=url, name=filename)

        # 完成下载，打印统计信息
        self.stats.finish()
        self.stats.print_summary()

        return self.stats.get_summary()




##
