import time
import traceback

import requests, os, datetime
from urllib.parse import *
from fake_useragent import UserAgent
from log_tools import log_tools
logger = log_tools.get_logger()


def download(
        fileUrl: str, fileName: str, targetPath: str, retry: bool = None, headers: dict = None, proxies: dict = None
) -> bool or str:
    """
    1、下载图片
    只负责下载和保存工作
    :param urlArr: 下载的图片链接列表
    :param targetPath: 保存路径
    :param headers: 自定义头部信息
    :param proxies: 自定义代理
    :return:
    """

    result = True
    repeatStrategy = "1"#文件重复策略：0-重命名/1-跳过/2-覆盖
    #文件信息
    logger.info('[文件链接]:    '+fileUrl)
    if not os.path.exists(targetPath):
        os.makedirs(targetPath)
    logger.info('[文件名 ]:     ' + fileName)
    filePath = targetPath + fileName
    if os.path.exists(filePath):
        if repeatStrategy=="0":
            tmp = filePath.rsplit('.', maxsplit=1)
            filePath = tmp[0] + "-" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + "." + tmp[1]
        elif repeatStrategy=="1":
            logger.info(['文件已存在，跳过该文件。'])
            return True
        elif repeatStrategy=="2":
            os.remove()
        else:
            logger.error("文件重复策略错误：",repeatStrategy)


    logger.info('[文件路径]:   '+filePath)


    # agent = UserAgent()
    # agent = UserAgent(cache=False)
    # agent = UserAgent(use_cache_server=False)
    agent = UserAgent(cache_path=r"fake_useragent.json", use_external_data=True)
    logger.info(f'agent:{agent.cache_path}')
    urlParse = urlparse(fileUrl)
    if headers is None:
        headers = {
            'User-Agent': agent.random,
            'Referer': f'{urlParse.scheme}://{urlParse.netloc}',
            'Host': urlParse.netloc,
        }

    # 下载
    try:
        start = time.time()  # 开始时间

        response = requests.get(fileUrl, stream=True, headers=headers, proxies=proxies)
        chunk_size = 1024  # 每次下载数据大小
        content_size = int(response.headers["content-length"])  # 文件总字节数
        logger.info('[文件大小]: {:.3f} MB\n'.format(content_size / chunk_size / 1024))
        size = 0
        with open(filePath, 'wb') as f:
            for data in response.iter_content(chunk_size=chunk_size):
                f.write(data)
                size += len(data)  # 已下载文件大小
                print('\r[下载进度]: {}{:.2f}%'.format('>' * int(size * 50 / content_size), float(size / content_size * 100)),end='')  # 下载进度条
            print("\n")
    except TimeoutError:
        result = False
        logger.error(f'[下载超时]：{fileUrl}')
    except Exception as e:
        result = False
        logger.error(f'[下载失败] -> 原因：{e}')
        record = {"url":fileUrl,"path":filePath,"fileName":fileName,"retry":retry}
        logger.error(f'[错误记录]:  {record}')
        logger.debug("\n" + traceback.format_exc())
    end = time.time()  # 结束时间

    logger.info("[下载时间]: {:.2f}s".format(end - start))
    logger.info("".center(100, "*"))
    return result


if __name__ == '__main__':
    download(
        'https://img.997pp.com/Tu/onlyfans/Images/hongkongdoll-0152.jpg',
        '0120.jpg',
        # 'https://d1.xia12345.com/video/202210/634195e4ce84497a55c76b83/hd.mp4',
        # "hd.mp4",
        targetPath='./download'
    )

