import os
from common import DownTool
import common
from common import LoggerTool
from common import FileTool
import requests
import time
from common import ProxyIp
import io
from concurrent import futures


# proxies=ProxyIp.ProxyIp().get_random_proxy_ip(),
def down_img(img_url, title, down_dir):
    img_url = img_url.strip()
    image_name = img_url.split("/")[-1]
    if not os.path.exists(os.path.join(down_dir, image_name)):
        # logger.debug('start get request.get')
        try:
            get_request = requests.get(img_url, headers=common.header, timeout=60)
        except Exception as e:

            logger.error("{}  请求失败，请求时间是：{}，请求地址：{}".format(
                title, common.get_datetime('%Y/%m/%d %H:%M'), img_url))

            # 写入文件
            with open(down_dir + os.sep + 'un_done-' + common.get_datetime('%Y-%m-%d') + '.text',
                      'a+',
                      encoding='utf-8') as f:
                f.write(
                    'title：%s ;  图片链接 - %s \n' % (title, img_url)
                )
            # logger.info('失败原因：%s; 休眠5秒继续请求' % e)
            time.sleep(5)
            #
            get_request = requests.get(img_url, headers=common.header, timeout=60)
        # logger.debug('end request.get')
        image = get_request.content
        image_b = io.BytesIO(image).read()
        logger.info('%s 图片大小 : %i kb' % (image_name, len(image_b) / 1000))
        if len(image_b) > 0:
            os.chdir(down_dir)
            with open(image_name, 'wb') as f:
                f.write(image)
        else:
            raise ValueError("图片大小=0 ," + image_b)
    else:
        # print('img_url: ' + os.path.join(down_dir, image_name) + '已存在')
        # logger.debug("img_url:{}已存在".format(os.path.join(down_dir, image_name)))
        logger.info('img_url:%s  已存在' % (os.path.join(down_dir, image_name)))
        # logger.info('img_url:%s  已存在' % (os.path.join(down_dir, image_name)))
        # logger.error("img_url:{} 已存在".format(os.path.join(down_dir, image_name)))


def get_from_dir(file_list):
    for item in file_list:
        logger.info(item)
        down_path, file_name = os.path.split(item)
        # print(down_path)
        print(down_path.split(os.sep)[-1])
        img_list = []
        with open(item, encoding='utf-8') as f:
            for num, value in enumerate(f, 1):
                # logger.info('read file........')
                line = value.strip('\n')
                if line == '':
                    logger.info('当前行为空：%i line' % num)
                    continue
                split_ = line.split("- ")[1]
                img_list.append(split_)
        if len(img_list) == 0:
            continue
        fs = []
        with futures.ThreadPoolExecutor(max_workers=5 if len(img_list) > 5 else len(img_list),
                                        thread_name_prefix="down-thread") as executor:
            for img_url in img_list:
                submit = executor.submit(down_img, img_url, down_path.split(os.sep)[-1], down_path)
                # submit完成之后的回调函数
                submit.add_done_callback(common.executor_callback)
                fs.append(submit)
        done, not_done = futures.wait(fs, timeout=60)
        if len(img_list)== len(done):
            logger.info("{} 全部下载完毕!".format(item))
            os.remove(item)
            logger.info("{} 文件已删除!".format(item))


# 从分类目录下下载  例如 2021-05目录下的文件
def get_from_datecatory(file_list):
    for item in file_list:
        logger.info(item)
        dir_path, file = os.path.split(item)
        file_name = str(file).split('un_done')[0]

        img_list = []
        with open(item, encoding='utf-8') as f:
            readline = f.readline()

            split_ = readline.split('[')
            for i in split_:
                if i.startswith('https'):
                    img_list.append(i.split(']')[0])
        print(img_list)
        # 下载
        # print(file_name)
        down_dir = dir_path + os.sep + file_name + os.sep
        # os.mkdir(down_dir)
        os.chdir(down_dir)
        fs = []
        if len(img_list) == 0:
            continue
        with futures.ThreadPoolExecutor(max_workers=5 if len(img_list) > 5 else len(img_list),
                                        thread_name_prefix="down-thread") as executor:
            for img_url in img_list:
                submit = executor.submit(down_img, img_url, file_name, down_dir)
                # submit完成之后的回调函数
                submit.add_done_callback(common.executor_callback)
                fs.append(submit)
        done, not_done = futures.wait(fs, timeout=60)
        if len(img_list) == len(done):
            logger.info("{} 全部下载完毕!".format(item))
            os.remove(item)
            logger.info("{} 文件已删除!".format(item))


if __name__ == '__main__':
    logger = LoggerTool.get_logger(__name__)
    user_dir = common.get_user_dir()
    print(user_dir)
    file_list = FileTool.get_file_list('text', user_dir + 'Pictures\\Camera Roll\\PORN')
    # print(file_list)
    # get_from_datecatory(file_list)
    get_from_dir(file_list)
