import urllib.request
import urllib.parse
from lxml import etree
import os

def createPageUrl():
    """
        作用：生成所有需要爬取的url
        url: 需要爬取的url地址
    """
    for i in range(1, 17):
        loadPage("http://www.bbsnet.com/page/"+str(i))

def loadPage(link):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
    request = urllib.request.Request(link, headers=headers)
    html = urllib.request.urlopen(request).read().decode('utf8')
    content = etree.HTML(html)
    imglist = content.xpath(
        '//ul[@class="masonry clearfix"]/li/div/a/img/@src')  # 获取当前页面所有gif图片
    # 取出每个图片的连接
    for link in imglist:
        writeImage(link)


def writeImage(link):
    # print "正在保存 " + filename
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
    # 文件写入
    request = urllib.request.Request(link, headers=headers)
    # 图片原始数据
    image = urllib.request.urlopen(request).read()

    #创建文件夹
    os.makedirs('./img/',exist_ok=True)

    # 取出连接后10位做为文件名
    filename = link[-10:]
    # 写入到本地磁盘文件内
    with open(r"./img/"+filename, "wb") as f:
        f.write(image)
    print("已经成功下载 " + filename)


if __name__ == "__main__":
    createPageUrl()
