# -*- coding: utf-8 -*



import urllib .request  # 导入用于打开URL的扩展库模块
import urllib .parse
from bs4 import BeautifulSoup
import re  # 导入正则表达式模块
from lib.mylib  import tool #引入工具类

tool = tool() #实例化
def open_url(url):
    """
    打开页面
    """
    req = urllib.request.Request(url)  # 将Request类实例化并传入url为初始值，然后赋值给req
    # 添加header，伪装成浏览器
    req.add_header(
        'User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0')
    # 访问url，并将页面的二进制数据赋值给page
    page = urllib.request.urlopen(req)
    html = page.read()
    soup = BeautifulSoup(html, 'lxml')
    return soup
    

def url_list(html):
    """
    获取图片列表
    """
    urls = html.body.find_all('article')
    list =[]
    for aName in urls:
      handleUrl(aName)
      
def handleUrl(res):
    '''
    处理多余部分的数据
     链接 + 名字
    '''
    pattern =re.compile(r'<a href="(.*?)">(.*?)</a>')
    res = str(res)
    urls = re.findall(pattern, res)
    downlImg(urls[0])

def downlImg(urllist):
    '''
    下载图片
    '''
    print("开始下载")    
    img = str(urllist).split(",")
    imgUrl = img[0][2 : -1] 
    imgName =img[1][8:-2]
    imgHtml = open_url(imgUrl)
    pattern =re.compile(r'<.*?src="(.*?)".*?')
    htmlP = imgHtml.body.find_all(attrs={"referrerpolicy": "no-referrer"})
    imgUrlList = re.findall(pattern, str(htmlP))
    for index in range(len(imgUrlList)):
        tool.downloadImg("data/"+imgName , imgUrlList[index] , index)
    print("下载完成")    

if __name__ == '__main__':
    url = "http://acg17.com/category/meitu/pixiv-wallpaper/"
    htmlText = open_url(url)
    url_list(htmlText)
