# -*- coding: utf-8 -*-
import time
import requests
from multiprocessing import Pool
import os
import re
import shlex
import logging
import argparse
import sys

# logger经过main执行set_log初始化后,这里直接使用就可以了
logger = logging.getLogger(__name__)


class Downloader:

    def __init__(self, list_urls, save_path, conn_num=50, dl_file_prefix=None, image_verify=False,
                 auto_add_jpg=False):
        self.list_urls = list_urls or []
        self.save_path = save_path
        # 初始化并发数
        self.conn_num = conn_num
        url_num = len(self.list_urls)
        if url_num == 0:
            logger.warning("Empty URL list provided")
            self.url_column_idx = -1
        else:
            if self.conn_num > url_num:
                self.conn_num = url_num
            self.url_column_idx = self.__find_url_column(self.list_urls[0])  # url列索引(即第几列才是真正的url,默认是0)
        self.dl_file_prefix = dl_file_prefix
        self.image_verify = image_verify
        self.auto_add_jpg = auto_add_jpg

    @staticmethod
    def __find_url_column(text):
        """
        查找包含URL的列索引。
        Args:
            text (str): 包含多个列数据的字符串,列之间使用空格或制表符分隔。
        Returns:https://archery.4s12580.com/dashboard/
            int: 包含URL的列索引(从1开始计数),如果不存在包含URL的列则返回-1。
        """
        # 使用 shlex 分割(自动处理空格、制表符等)
        columns = shlex.split(text.strip())
        for i, col in enumerate(columns, start=0):
            if re.match(r'^https?://\S+', col):
                return i
        return -1

    @staticmethod
    def __is_image_valid(file_path):
        from PIL import Image
        try:
            with Image.open(file_path) as img:
                img.verify()  # 验证文件完整性
                # img.load()  # 尝试加载图像数据(可选)
            return True
        except (IOError, OSError, Image.DecompressionBombError) as e:
            return False

    def __download_one_file(self, str_url, save_path):
        # 处理有多列的情况
        list_line = re.sub(r'[ \t]+', ' ', str_url).split()
        url = list_line[self.url_column_idx] if len(list_line) > 1 else str_url
        
        # 获取文件名
        name = url.split('/')[-1]
        if self.auto_add_jpg:
            root, ext = os.path.splitext(name)
            if not ext:
                name = "{0}.jpg".format(root)
        if self.dl_file_prefix:
            if self.dl_file_prefix.startswith('url_file_idx:'):
                idx = int(self.dl_file_prefix[len('url_file_idx:'):])
                name = "{0}__{1}".format(list_line[idx], name)
            else:
                name = self.dl_file_prefix + name

        try:
            r = requests.get(url, stream=True, timeout=10)
            if r.status_code == 200:
                imgcode = r.content
                if len(imgcode) < 1024:
                    logger.debug("status=error: %s" % str_url)
                    return [False, url, 'File less than 1KB']
                
                try:
                    os.makedirs(save_path)
                except OSError as e:
                    if e.errno != 17:  # 忽略目录已存在的错误
                        raise
                with open(os.path.join(save_path, name), 'wb') as f:
                    f.write(imgcode)
                
                if self.image_verify:
                    img_path = os.path.join(save_path, name)
                    if not self.__is_image_valid(img_path):
                        os.remove(img_path)
                        return [False, url, "Invalid image removed"]
                return [True, os.path.join(save_path, name), '']
            return [False, url, 'HTTP Status: {}'.format(r.status_code)]
        except Exception as e:
            logger.debug("status=error: %s" % str_url)
            logger.error("Error downloading %s: %s" % (url, str(e)))
            return [False, url, str(e)]

    def __async_download(self, list_batch_urls):
        import threading
        from Queue import Queue
        
        q = Queue()
        for url in list_batch_urls:
            q.put(url)
        
        results = []
        lock = threading.Lock()
        
        def worker():
            while True:
                try:
                    url = q.get_nowait()
                except:
                    break
                
                result = self.__download_one_file(url, self.save_path)
                with lock:
                    results.append((url, result))  # 保存原始URL和结果
                q.task_done()
        
        threads = []
        for i in range(min(self.conn_num, len(list_batch_urls))):
            t = threading.Thread(target=worker)
            t.daemon = True
            t.start()
            threads.append(t)
        
        for t in threads:
            t.join()
        
        ok_results = []
        fail_results = []
        for original_url, result in results:
            status, name, msg = result
            if status:
                ok_results.append(name)
            else:
                fail_results.append((original_url, msg))  # 保存原始URL和错误信息
                try:
                    url_str = original_url.decode('utf-8') if isinstance(original_url, str) else original_url
                    msg_str = msg.decode('utf-8') if isinstance(msg, str) else msg
                    logger.warning(u"失败记录 - URL: %s - 原因: %s" % (url_str, msg_str))
                except UnicodeDecodeError:
                    try:
                        url_str = original_url.decode('gbk') if isinstance(original_url, str) else original_url
                        msg_str = msg.decode('gbk') if isinstance(msg, str) else msg
                        logger.warning(u"失败记录 - URL: %s - 原因: %s" % (url_str, msg_str))
                    except Exception as e:
                        logger.warning(u"失败记录 - URL解码错误: %s" % str(e))
        
        return {"status": "ok", "ok": ok_results, "fail": fail_results}

    def __concurrency_download(self, list_batch_urls):
        # 改为同步下载
        return self.__async_download(list_batch_urls)

    def download(self, detect_ok_cnt=0, detect_fail_cnt=0):
        begin_pos = detect_ok_cnt + detect_fail_cnt
        line_num = 0
        line_all = len(self.list_urls)
        line_left = line_all
        list_batch_urls = []
        ok_dl = detect_ok_cnt
        fail_dl = detect_fail_cnt
        all_fail_results = []  # 保存所有失败的URL和原因
        # 使用更高效的集合操作
        seen_urls = set()
        duplicate_urls = []
        unique_urls = []
        
        # 先快速筛选出重复URL
        for url in self.list_urls:
            if url in seen_urls:
                duplicate_urls.append(url)
            else:
                seen_urls.add(url)
                unique_urls.append(url)
        
        # 主下载循环
        for url in unique_urls:
            line_num = line_num + 1
            line_left = line_left - 1
            if line_num < begin_pos:  # 断点续传：当前行小于传入的位置时,跳过
                continue
                
            list_batch_urls.append(url)
                
            if line_num % self.conn_num == 0 or line_left < self.conn_num:  # 每50张图片一批
                dict_dl_result = self.__concurrency_download(list_batch_urls)
                batch_ok_dl = len(dict_dl_result['ok'])
                batch_fail_dl = len(dict_dl_result['fail'])
                list_batch_urls = []    # 下载完一批,清空列表
                ok_dl = ok_dl + batch_ok_dl
                fail_dl = fail_dl + batch_fail_dl
                all_fail_results.extend(dict_dl_result['fail'])  # 收集所有失败记录
                # 移除进度日志输出
        # 输出重复URL信息
        if duplicate_urls:
            logger.info("重复URL列表:")
            for url in duplicate_urls:
                try:
                    url_str = url.decode('utf-8') if isinstance(url, str) else url
                    logger.info(u"- %s" % url_str)
                except UnicodeDecodeError:
                    try:
                        url_str = url.decode('gbk') if isinstance(url, str) else url
                        logger.info(u"- %s" % url_str)
                    except Exception as e:
                        logger.info(u"- URL解码错误: %s" % str(e))
            logger.info("总重复数: %d" % len(duplicate_urls))
            
        # 输出失败信息    
        if fail_dl > 0:
            logger.info("失败URL列表:")
            for url, reason in all_fail_results:
                try:
                    url_str = url.decode('utf-8') if isinstance(url, str) else url
                    reason_str = reason.decode('utf-8') if isinstance(reason, str) else reason
                    logger.info(u"- %s (原因: %s)" % (url_str, reason_str))
                except UnicodeDecodeError:
                    try:
                        url_str = url.decode('gbk') if isinstance(url, str) else url
                        reason_str = reason.decode('gbk') if isinstance(reason, str) else reason
                        logger.info(u"- %s (原因: %s)" % (url_str, reason_str))
                    except Exception as e:
                        logger.info(u"- URL解码错误: %s" % str(e))
            logger.info("总失败数: %d" % fail_dl)
            
        # 最终状态    
        if line_num == line_all:
            if fail_dl == 0:
                logger.info('全部下载成功')
            else:
                logger.info('下载完成')


def set_log(log_level, log_name):
    # 日志级别设置
    # 第一步,创建一个logger
    global logger
    logger = logging.getLogger()
    logger.setLevel(eval("logging.%s" % log_level))  # Log等级总开关
    
    # 禁用requests和urllib3的调试日志
    logging.getLogger("requests").setLevel(logging.WARNING)
    logging.getLogger("urllib3").setLevel(logging.WARNING)
    logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)

    # 第二步,创建一个handler,用于写入日志文件
    logfile = log_name
    fh = logging.FileHandler(logfile, mode='a')
    fh.setLevel(logging.DEBUG)

    # 第三步,再创建一个handler,用于输出到控制台
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)

    # 第四步,定义handler的输出格式
    if log_level == 'DEBUG':
        formatter = logging.Formatter(
            "[%(asctime)s] - %(filename)s[line:%(lineno)d - %(threadName)s] %(levelname)s:%(message)s")
    else:
        formatter = logging.Formatter("[%(asctime)s] %(levelname)s: %(message)s")
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)

    # 第五步,将logger添加到handler里面
    logger.addHandler(fh)
    logger.addHandler(ch)


def main():
    # 创建 ArgumentParser 对象
    parser = argparse.ArgumentParser(description="Download images from URL files")

    # 添加 --url_file 参数,类型是 str,必须提供
    parser.add_argument('--url_file', type=str, required=True, help='url文件名(e.g., url.txt)')

    # 添加 --save_path 参数,类型是 str,默认值可以是 ./download1,用于保存下载的文件
    parser.add_argument('--save_path', type=str, default='D:/kw/pywork/file_download/download',
                        help='文件保存的路径,例如/root/download,默认download1')

    # 添加 --conn_num 参数,类型是 int,默认值是 20,表示同时下载的数量
    parser.add_argument('--conn_num', type=int, default=10, help='同时下载的并发数,默认20')

    # 添加 --dl_ok_cnt 参数,类型是 int,默认值是 0,表示从第几行开始下载
    parser.add_argument('--dl_ok_cnt', type=int, default=0,
                        help='之前下载成功的文件数,用于断点续传,须和--dl_fail_cnt参数同时传入,默认0(即从第一行url_file开始)')

    # 添加 --detect_fail_cnt 参数,类型是 int,默认值是 0,表示从第几行开始下载
    parser.add_argument('--dl_fail_cnt', type=int, default=0,
                        help='之前下载失败的文件数,用于断点续传,须和--dl_ok_cnt参数同时传入,默认0(即从第一行url_file开始)')

    # 添加 --log_level 参数,类型是 str,默认值是 INFO,表示日志级别
    parser.add_argument('--log_level', type=str, default='INFO',
                        help='日志级别,可选INFO/DEBUG/ERROR,默认INFO')

    # 添加 --dl_file_prefix 参数,类型是 str,默认值是 None, 对每个下载文件添加指定的前缀
    parser.add_argument('--dl_file_prefix', type=str, default=None,
                        help='对每个下载文件添加指定的前缀,如20230609_,默认为None,当前缀为url_file_idx:n时(n为整数,从0开始,取url_file对应行的第n列作前缀,并用两个下载线分割前缀和原文件名)')

    # 添加 --check_image 参数,类型是 Bool,默认值是 False,表示是否检测下载的图片是否有效
    parser.add_argument('--image_verify', type=bool, default=False,
                        help='是否检测下载的图片是否损坏,如果损坏则删除,默认False')
    # 添加 --auto_add_jpg 参数,类型是 Bool,默认值是 False,表示是否自动添加jpg后缀
    parser.add_argument('--auto_add_jpg', type=bool, default=False,
                        help='当无扩展名时,自动添加jpg后缀,默认False')

    # 添加 --log_file 参数,类型是 str,默认值是 None, 日志文件名
    parser.add_argument('--log_file', type=str, default=None,
                        help='日志文件名,默认本文件名扩展名改为log,即file_download.log')

    # 解析命令行参数
    args = parser.parse_args()

    # 使用参数
    url_file = args.url_file
    save_path = args.save_path
    conn_num = args.conn_num
    dl_ok_cnt = args.dl_ok_cnt
    dl_fail_cnt = args.dl_fail_cnt
    log_level = args.log_level
    dl_file_prefix = args.dl_file_prefix
    image_verify = args.image_verify
    auto_add_jpg = args.auto_add_jpg
    log_file = args.log_file

    # 初始化日志(Python 2.7兼容)
    if log_file is None:
        file_name = os.path.splitext(os.path.basename(__file__))[0]
        log_file = file_name + '.log'
    # 确保日志目录存在
    log_dir = os.path.dirname(log_file)
    if log_dir and not os.path.exists(log_dir):
        os.makedirs(log_dir)
    # 设置日志处理器为二进制模式
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s %(levelname)s %(message)s',
        filename=log_file,
        filemode='ab'  # 二进制追加模式
    )
    logger = logging.getLogger(__name__)

    # 读取url文件(Python 2.7兼容)
    with open(url_file, 'rb') as f:
        list_url = [line.decode('utf-8').strip() for line in f]

    # 去除每行末尾的换行符
    list_urls = [x.strip() for x in list_url]

    #  输出参数
    logger.info("url_file=%s, save_path=%s, conn_num=%s" % (url_file, save_path, conn_num))

    # 创建下载对象
    downloader = Downloader(list_urls=list_urls,
                            save_path=save_path,
                            conn_num=conn_num,
                            dl_file_prefix=dl_file_prefix,
                            image_verify=image_verify,
                            auto_add_jpg=auto_add_jpg)
    # 记录开始时间
    start_time = time.time()

    # 下载文件(指定100并发数,并指定下载到./new目录下,会自动创建new目录)
    downloader.download(dl_ok_cnt, dl_fail_cnt)

    # 记录结束时间
    end_time = time.time()
    # 计算耗时
    elapsed_time = end_time - start_time
    logger.info("Duration：%.2f second" % elapsed_time)


if __name__ == "__main__":
    main()
