import requests
from lxml import etree
from concurrent.futures import ThreadPoolExecutor
import random


def download(url):
    headers={
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36'
    }
    A=['60.169.95.106:1133','115.218.3.49:9000','115.218.4.166:9000','114.104.139.200:9005'
]
    i=random.choice(A)
    proxies = {
        'http': 'http://'+i,
        'https': 'https://'+i
    }
    print(url)
    res=requests.get(url,headers=headers,proxies=proxies)
    print(res.status_code)
    html = etree.HTML(res.content.decode('gbk'))
    hrefs = html.xpath("//div[@class='list']/ul/li/a/img/@src")
    titles = html.xpath("//div[@class='list']/ul/li/a/img/@alt")
    print(hrefs)
    for href, title in zip(hrefs, titles):
        print(href)
        with open("img/" +title+ '.jpg','wb') as f:
            print(href)
            response=requests.get(href,headers=headers).content
            f.write(response)
            print(title, "------下载完成------")

if __name__ == '__main__':
    base_url = 'http://www.netbian.com/mei/index.htm'
    list = ["http://www.netbian.com/mei/index_"+str(i+2)+".htm" for i in range(16)]
    list.insert(0, base_url)
    with ThreadPoolExecutor() as pool:
        [pool.submit(download,url) for url in list]
