import requests
import parsel
import time
import random

time_start = time.time()
time_slept = 0
for i in range(1, 10, 1):
    # 1.目标url harders参数
    base_url = 'https://wall.alphacoders.com/popular.php?lang=Chinese&quickload=2119&page={}'.format(str(i))
    user_agent_list = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 "
        "Safari/537.36 Edg/92.0.902.78",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; ) Gecko/20100101 Firefox/61.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
    ]
    headers = {'User-Agent': random.choice(user_agent_list)}  # 随机轮流更换配置请求头的user-agent来避免爬虫被服务端拒
    # sleep = round(random.uniform(0, 3), 1)
    # time.sleep(sleep) #随机时间延迟获取
    # time_slept += sleep

    # 2.发送请求 requests 模拟浏览器发送请求，获取响应数据
    response = requests.get(url=base_url, headers=headers)
    data = response.text

    # 3.解析数据parsel转化为Selector对象 Selector对象具有 get re正则表达式 xpath css等方法 可对转化的数据进行处理
    html_data = parsel.Selector(data)
    data_list = html_data.xpath('//div[@class="thumb-container-big "]//div[@class="boxgrid"]/a/@href').extract()

    # 子页面
    for item in data_list:
        response_sub = requests.get(url='https://wall.alphacoders.com' + item, headers=headers).text
        response_sub_data = parsel.Selector(response_sub)
        img_link = response_sub_data.xpath('//div[@class="center img-container-desktop"]/a/@href').extract_first()
        img_slice = img_link.split('/')
        # 请求下载连接所需的参数
        image_server = img_slice[2].split('.')[0]
        content_id = img_slice[-1].split('.')[0]
        file_type = img_slice[-1].split('.')[1]
        post_url = 'https://api.alphacoders.com/content/get-download-link'
        data = {'content_id': content_id, 'content_type': 'wallpaper', 'file_type': file_type,
                'image_server': image_server}
        img_url = str(requests.post(post_url, data).content)
        download_url = img_url.split("'")[1].split("\"")[7].replace("\\", "")
        download_data = requests.get(url=download_url, headers=headers).content
        print("download url= " + download_url)
        # 4.保存数据
        file_name = content_id + '.' + file_type

        with open('E:\DownLoad\Test\\' + file_name, mode='wb') as f:
            print('save ' + file_name + " successful")
            f.write(download_data)
    print("---------------group " + str(i) + "'s downLoad has finished!---------------")
print("-----------------------------all group's downLoad has finished!-----------------------------")
time_end = time.time()
print('It takes', round((time_end - time_start), 1), 'seconds')
print('Sleep took', round(time_slept, 1), 'seconds')
