import gevent  # gevent并发模块
import grequests  # grequests并发库
import requests  # requests导入要注意顺序，应在gevent，grequests后面
import time
import random
import parsel  # 字典模块，xpath解析网页
from tqdm import tqdm  # 进度条模块


def parallel_get_url(link):
    global amount  # 将amount指向全局变量
    image_server_list = []
    content_id_list = []
    file_type_list = []
    download_data = []
    base_url = link
    # 随机切换头信息
    user_agent_list = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 "
        "Safari/537.36 Edg/92.0.902.78",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; ) Gecko/20100101 Firefox/61.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
    ]
    headers = {'User-Agent': random.choice(user_agent_list)}
    # 主页面
    pbar.update(3)
    pbar.set_description_str(" visting original webpage  ")
    response = requests.get(url=base_url, headers=headers)
    data = response.text
    # 子页面
    html_data = parsel.Selector(data)
    data_list = html_data.xpath('//div[@class="thumb-container-big "]//div[@class="boxgrid"]/a/@href').extract()
    raw = parallel_sub_url(data_list)  # 获取子页面链接组
    amount = len(raw)
    pbar.update(5)
    pbar.set_description_str(" get sub webpage list over ")
    for i in range(0, amount):
        response_sub = raw[i].text
        response_sub_data = parsel.Selector(response_sub)
        img_link = response_sub_data.xpath('//div[@class="center img-container-desktop"]/a/@href').extract_first()
        img_slice = img_link.split('/')
        # 请求下载连接所需的参数
        image_server_list.append(img_slice[2].split('.')[0])
        content_id_list.append(img_slice[-1].split('.')[0])
        file_type_list.append(img_slice[-1].split('.')[1])
        download_data.append(
            '?content_id=' + content_id_list[-1] + '&content_type=wallpaper' + '&file_type=' + file_type_list[
                -1] + '&image_server=' + image_server_list[-1])
    res = parallel_download_url(download_data)  # 请求下载链接组
    pbar.update(2)
    pbar.set_description_str("  download request data get")
    for i in range(0, amount):  # 保存从服务器返回的下载链接组
        download_list.append(str(res[i].content).split("'")[1].split("\"")[7].replace("\\", ""))


def parallel_sub_url(list):  # 获取子页面链接组
    tasks = [grequests.get('https://wall.alphacoders.com' + u) for u in list]
    res = grequests.map(tasks, size=30)
    return res


def parallel_download_url(list):  # 请求下载链接组
    tasks = [grequests.post('https://api.alphacoders.com/content/get-download-link' + d) for d in list]
    res = grequests.map(tasks, size=30)
    return res


def download(url):  # 下载程序
    global pbar
    global download_get
    response = requests.get(url=url)
    download_get.append(response)
    pbar.update(2)


def parallel_download():  # gevent并发执行
    gevent_list = []
    for i in download_list:
        gevent_list.append(gevent.spawn(download, i))
    gevent.joinall(gevent_list)
    return 0


def auto_run(num):
    global download_list
    global amount
    global pbar
    for i in range(0, num):
        pbar = tqdm(total=100, colour='#278fff')  # 初始化进度条
        pbar.ncols = 100  # 进度条长度
        pbar.mininterval = 0.1  # 最小更新间隔
        download_list = []
        parallel_get_url(  # 目标网页url
            'https://wall.alphacoders.com/popular.php?lang=Chinese&page={}'.format(str(i + 1)))
        pbar.set_description_str("beginning parallel download")
        start = time.time()
        parallel_download()  # 调用并发下载
        pbar.set_description_str("download over,write to file")
        for j in range(0, amount):
            file_name = download_get[j].url.split('/')[5] + '.' + download_get[j].url.split('/')[7]
            with open('E:\DownLoad\wallpapers\\' + file_name, mode='wb') as f:
                pbar.update(1)
                f.write(download_get[j].content)
        end = time.time()
        print("\n=======================================download sub page [" + str(i + 1) + "] finished takes " + str(
            round(end - start)) + 's======================================\n')
        pbar.close()


t1 = time.time()
download_list = []
download_get = []
pbar = ""
amount = 0  # 下载链接总数
auto_run(2)  # 启动爬虫 传入参数页数
t2 = time.time()
print("\n===================================all page's downLoad has finished！it takes " + str(round((t2 - t1), 1))
      + 's' + "===================================")
