# -*- coding: utf-8 -*-
import requests                     # 爬虫异常粗暴，爬了一次，实验室电脑IP被封一天（网站robots.txt文件允许任何爬取，但此爬虫行为过于恶劣，所以。。。）
from lxml import etree
from fake_useragent import UserAgent
import os, time, urllib.request

headers = {
    "User-Agent": UserAgent().random
}


def FileSave(save_path, img_urls):
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    num = range(len(img_urls))
    for ico, n in zip(img_urls, num):
        try:
            file_name = save_path + str(n) + ".ico"
            time.sleep(0.5)
            urllib.request.urlretrieve(ico, file_name)  # 打开图片地址，下载图片保存在本地，
        except:
            print("error")


def Page_Level(myPage):  # 一级页面：频道名称和频道url
    dom = etree.HTML(myPage)
    zip_name_urls = []
    channel_names0 = dom.xpath('/html/body/div[6]/div[2]/ul/li/div/a/@href')
    for i in channel_names0:
        urls = 'http://ico.58pic.com' + str(i)
        zip_name_urls.append(urls)
    return Page_Level2(zip_name_urls)


def Page_Level2(myPzip_name_urlsage):
    img_urls = []
    for u in myPzip_name_urlsage:
        time.sleep(1)
        Page = requests.get(u, headers=headers).content.decode("utf-8")  # 返回网页html源码
        dom = etree.HTML(Page)
        channel_names0 = dom.xpath('//div[@class="box_main icon_list"]/ul/li/dl/dt[2]/label/a[1]/@rel')
        for i in channel_names0:
            img_url = 'http://ico.58pic.com' + str(i)
            img_urls.append(img_url)
    return img_urls


def spider(save_path, url):
    myPage = requests.get(url, headers=headers).content.decode("utf-8")  # 返回网页html源码
    img_urls = Page_Level(myPage)  # 爬取
    FileSave(save_path, img_urls)


if __name__ == "__main__":
    start = time.time()
    print('start......')
    for number in range(1,5):
        start_url = r"http://ico.58pic.com/zhuantituijian/index/" + str(number) + ".html"
        save_path = 'H:\\新建文件夹\\图标' + str(number) + '\\'  # 保存内容
        spider(save_path,start_url)
    print('end')
    end = time.time()
    print('爬虫运行时间为%.4f秒' % (end - start))
