import grequests
import time
import requests
import random
import parsel
import os


def parallel_get_url(link):
    content_id_list = []
    file_type_list = []
    download_data = []
    base_url = link
    user_agent_list = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 "
        "Safari/537.36 Edg/92.0.902.78",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; ) Gecko/20100101 Firefox/61.0",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
    ]
    headers = {'User-Agent': random.choice(user_agent_list)}
    # 主页面
    print(">>> visting main page")
    response = requests.get(url=base_url, headers=headers)
    data = response.text
    # 子页面
    html_data = parsel.Selector(data)
    data_list = html_data.xpath('//div[@class="thumb-element"]/a/@href').extract()
    raw = parallel_sub_url(data_list)
    print(">>> get sub page list over")
    for i in range(0, len(raw)):
        response_sub = raw[i].text
        response_sub_data = parsel.Selector(response_sub)
        img_link = response_sub_data.xpath('//div[@class="center"]/a/@href').extract_first()
        img_slice = img_link.split('/')
        # 请求下载连接所需的参数
        content_id_list.append(img_slice[-1].split('.')[0])
        file_type_list.append(img_slice[-1].split('.')[1])
        download_data.append(
            '?content_id=' + content_id_list[-1] + '&content_type=mobile' + '&file_type=' + file_type_list[
                -1])
    res = parallel_download_url(download_data)
    print(">>> collect data for download url request is finished!")
    for i in range(0, len(raw)):
        download_list.append(str(res[i].content).split("'")[1].split("\"")[7].replace("\\", ""))


def parallel_sub_url(list):
    tasks = [grequests.get('https://mobile.alphacoders.com' + u) for u in list]
    res = grequests.map(tasks, size=30)
    return res


def parallel_download_url(list):
    tasks = [grequests.post('https://api.alphacoders.com/content/get-download-link' + d) for d in list]
    res = grequests.map(tasks, size=30)
    return res


def parallel_download():
    tasks = [grequests.get(u) for u in download_list]
    res = grequests.map(tasks, size=30)
    return res


def mkdir(path):
    folder = os.path.exists(path)
    if not folder:
        os.makedirs(path)


def auto_run(num):
    global time_slept
    global download_list
    for i in range(0, num):
        download_list = []
        parallel_get_url(
            'https://mobile.alphacoders.com/by-device/635/iPhone-12-Pro-Wallpapers/?page={}&quickload=1'.format(
                str(i + 1)))
        print("begin parallel download...")
        # sleep = round(random.uniform(1, 3), 1)
        # print("waiting " + str(sleep) + "s")
        # time.sleep(sleep)  # 随机时间延迟获取
        # time_slept += sleep
        start = time.time()
        raw = parallel_download()
        file_path = "E:\DownLoad\phone"
        mkdir(file_path)  # 校验路径
        for j in range(0, len(raw)):
            file_name = raw[j].url.split('/')[5] + '.' + raw[j].url.split('/')[6]
            with open(file_path + '\\' + file_name, mode='wb') as f:
                print('save ' + file_name + " success")
                f.write(raw[j].content)
        end = time.time()
        print("========================download sub page [" + str(i + 1) + "] takes " + str(round((end - start), 1)) +
              's========================')


t1 = time.time()
time_slept = 0
download_list = []
auto_run(10)
t2 = time.time()
print("=======================all page's downLoad has finished！It takes " + str(round((t2 - t1 - time_slept), 1)) +
      's' + "=======================")
# print("slept: " + str(time_slept) + "s")
