# -*- coding: utf-8 -*-

"""
DateTime   : 2021/04/06 18:12
Author     : ZhangYafei
Description: 
"""
import os
from concurrent.futures.thread import ThreadPoolExecutor
from urllib.parse import urljoin

import requests
from lxml import etree
from prettytable import PrettyTable
from tqdm import tqdm
from zyf.timer import timeit

from utils import print_color

image_dir = 'images'

image_type_info = {
    1: '4kfengjing', 2: '4kmeinv', 3: '4kyouxi', 4: '4kdongman',
    5: '4kyingshi', 6: '4kmingxing', 7: '4kqiche', 8: '4kdongwu',
    9: '4krenwu', 10: '4kmeishi', 11: '4kzongjiao', 12: '4kbeijing',
}
image_tip_info = {
    1: '4K风景', 2: '4K美女', 3: '4K游戏', 4: '4K动漫',
    5: '4K影视', 6: '4K明星', 7: '4K汽车', 8: '4K动物',
    9: '4K人物', 10: '4K美食', 11: '4K宗教', 12: '4K背景',
}

image_data_list = set()
search_count = 0
downloaded_count = 0
success_count = 0
history_file_path = 'history.txt'

if not os.path.exists(image_dir):
    os.makedirs(image_dir)

history_file = open(history_file_path, mode='a+', encoding='utf-8')
downloaded_images = set()
if os.path.exists(history_file_path):
    history_file.seek(0)
    downloaded_images = {line.strip() for line in history_file}


def init_tips_info():
    table = PrettyTable(['序号1', '壁纸分类1', '序号2', '壁纸分类2'])
    row = []
    for index, key in enumerate(image_tip_info, start=1):
        row.extend([key, image_tip_info[key]])
        if len(row) == 4:
            table.add_row(row)
            row.clear()
            continue
        if index == len(image_type_info):
            row.extend(['-', '-'])
            table.add_row(row)
    print(table)


def search_images(image_type: int, page_nums: int):
    print(f'正在检索 - {image_tip_info[image_type]} - 请稍等 - 马上开始下载')

    url = 'https://pic.netbian.com'
    if image_type != 0:
        url = urljoin(url, image_type_info[image_type])
        image_type_desc = image_tip_info[image_type]
    else:
        url = url
        image_type_desc = '最新壁纸'

    if not os.path.exists(f'{image_dir}/{image_type_desc}'):
        os.mkdir(f'{image_dir}/{image_type_desc}')

    if page_nums > 1:
        with ThreadPoolExecutor(max_workers=page_nums) as pool:
            pool.map(get_image_data, [url for _ in range(page_nums)], [image_type_desc for _ in range(page_nums)],
                     range(1, page_nums + 1))
    else:
        get_image_data(url=url, image_type_desc=image_type_desc, page_num=page_nums)


def get_image_data(url: str, image_type_desc: str, page_num: int):
    global downloaded_count, search_count
    url = url if page_num == 1 else f'{url}/index_{page_num}.html'
    response = requests.get(url)
    html = etree.HTML(response.content)
    for li in html.xpath('//*[@id="main"]/div[3]/ul/li'):
        href = li.xpath('a/@href')[0]
        uid = href.rsplit('/', maxsplit=1)[1].replace('.html', '')
        title = li.xpath('a//img/@alt')[0].replace(' ', '_').replace(',', '-')
        response = requests.get(urljoin(url, href))
        html = etree.HTML(response.content)
        img_url = urljoin(url, html.xpath('//a[@id="img"]/img/@src')[0])
        filepath = f'{image_dir}/{image_type_desc}/{title}_{uid}.jpg'
        if filepath not in downloaded_images:
            search_count += 1
            image_data_list.add((img_url, filepath))
        else:
            downloaded_count += 1


@timeit
def start(image_type: int, page_nums: int):
    search_images(image_type=image_type, page_nums=page_nums)
    start_download(page_nums)


def start_download(workers: int):
    if len(image_data_list) > 20:
        data_list = [set() for _ in range(workers)]
        for index, image in enumerate(image_data_list):
            remainder = index % workers
            data_list[remainder].add(image)
        with ThreadPoolExecutor(max_workers=workers) as pool:
            pool.map(download_images, data_list)
    else:
        download_images(image_data_list)


def download_images(data_list: set = None):
    global success_count
    if not data_list:
        return

    task_progress = tqdm(data_list, ncols=100)
    for index, task in enumerate(task_progress, start=1):
        url, filepath = task
        task_progress.set_description(f"正在下载第 {index} 张图片")
        response = requests.get(url=url)
        with open(filepath, mode='wb') as f:
            f.write(response.content)
        history_file.write(f'{filepath}\n')
        success_count += 1


def run():
    init_tips_info()
    while True:
        image_type = input('请选择下载的壁纸分类(序号,默认为最新壁纸) >>  ')
        if image_type:
            if not image_type.isdecimal() or int(image_type) not in image_type_info:
                print('您输入的分类序号有误，请重新确认后输入')
                continue
            else:
                image_type = int(image_type)
        else:
            image_type = 0
        break
    while True:
        page_nums = input('请输入下载页数（整数，默认为1，每页20）>> ')
        if page_nums:
            if not page_nums.isdecimal():
                print('您输入的页数有误，必须为整数，请重新输入')
                continue
            else:
                page_nums = int(page_nums)
        else:
            page_nums = 1
        break

    start(image_type, page_nums)

    print_color(
        f'\n\n共检索到 {search_count + downloaded_count} 张图片, 之前已下载 {downloaded_count} 张, 还需下载 {search_count} 张, 本次下载成功 {success_count} 张')


if __name__ == '__main__':
    run()
