import urllib.request
import urllib.parse
from lxml import etree
import os


def createPageUrl():
    """
        作用：生成所有需要爬取的url
        url: 需要爬取的url地址
    """
    # for i in range(1, 245):
    #     loadPage("https://www.2717.com/ent/meinvtupian/list_11_"+str(i)+".html")


def loadPage(link):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
    request = urllib.request.Request(link, headers=headers)
    html = urllib.request.urlopen(request).read().decode('gbk')
    content = etree.HTML(html)
    imglist=content.xpath('//div[@class="MeinvTuPianBox"]/ul/li/a/i/img/@src'),content.xpath('//div[@class="MeinvTuPianBox"]/ul/li/a/i/img/@alt')
    # for link in imglist:
        # writeImage(link)
    print(imglist)


def writeImage(link):

    # 代理开关，表示是否启用代理
    proxyswitch = True
    # 构建一个Handler处理器对象，参数是一个字典类型，包括代理类型和代理服务器IP+PROT
    httpproxy_handler = urllib.request.ProxyHandler(
        {"http": "223.199.20.128:9999"})
    # 构建了一个没有代理的处理器对象
    nullproxy_handler = urllib.request.ProxyHandler({})
    if proxyswitch:
        opener = urllib.request.build_opener(httpproxy_handler)
    else:
        opener = urllib.request.build_opener(nullproxy_handler)
    # 构建了一个全局的opener，之后所有的请求都可以用urlopen()方式去发送，也附带Handler的功能
    urllib.request.install_opener(opener)

    # print "正在保存 " + filename
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
    # 文件写入
    request = urllib.request.Request(link,headers=headers)
    # 图片原始数据
    image = urllib.request.urlopen(request).read()

    # 创建文件夹
    os.makedirs('./img/', exist_ok=True)

    # 取出连接后10位做为文件名
    filename = link[-10:]
    # 写入到本地磁盘文件内
    with open(r"./img/"+filename, "wb") as f:
        f.write(image)
    print("已经成功下载 " + filename)


if __name__ == "__main__":
    createPageUrl()
