#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :test.py
# @Time      :2020/7/17 20:34
# @Author    :亮亮
# @说明       :
# @总结       :
import requests
from fake_useragent import UserAgent
from lxml import etree
from urllib.request import Request, urlopen
import ssl
import random
import time



def get_html2(url):
    """
    得到
    :param url: 网址url
    :return: 网址内容
    """

    headers = {
        "User-Agent": UserAgent().random,
        "Referer": "https://www.xsnvshen.com/album",
        "Cookie": "__cfduid=d35cd31104ed8b55733c4c266b398b67d1594986258"
        # "Cookie": "__cfduid=d35cd31104ed8b55733c4c266b398b67d1594986258"
    }

    # 创建代理IP
    proxies = [{"http": "107.167.195.2"}, {"http": "119.82.253.175"}, {"http": "187.191.4.88"}]
    proxies = random.choice(proxies)

    # 2.构建response返回对象
    while True:
        try:
            response = requests.get(url, headers=headers, verify=False, proxies=proxies)
            break
        except:
            print("Connection refused by the server..")
            print("Let me sleep for 5 seconds")
            print("ZZzzzz...")
            time.sleep(5)
            continue

    return response





def main():
    # url = "https://img.xsnvshen.com/album/19702/23861/000.jpg"
    url = "https://img.xsnvshen.com/album/20016/11983/"
    for i in range(10):
        new_url = url + "00{}".format(i) + ".jpg"
        print(new_url)

        with open("测试文件夹/{}".format(i) + ".jpg", "wb") as f:
            get = get_html2(new_url)
            f.write(get.content)
        print(get_html2(new_url))
        time.sleep(6)


if __name__ == '__main__':
    main()

