import random
from urllib.request import Request, ProxyHandler, build_opener, HTTPHandler, install_opener
from urllib.request import urlopen
from urllib.error import URLError
import os
import time
from lxml import etree

class DesginSpider(object):

    def __init__(self):
        self.head={
            'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Connection':'keep-alive',
            'Upgrade-Insecure-Requests':1,
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'
        }
        self.host='http://bz.cndesign.com'
        self.dir='E:\\designImg\\'

    def read_ip(self):
        file = open('../file/ip.txt', 'r')
        line = file.readlines()
        i = random.randint(1, len(line))
        ip = line[i].split("\t")
        addr = ip[0] + ':' + ip[1].split('\n')[0]
        return addr

    #获取下一页URL
    def get_next_url(self,url):
        print('爬取url:%s'%url)
        try:
            request=Request(headers=self.head,url=url)
            time.sleep(0.5)

            # proxy = ProxyHandler({'http': self.read_ip()})
            # opener = build_opener(proxy, HTTPHandler)
            # install_opener(opener)

            respon=urlopen(request,timeout=10).read().decode('utf-8')
            html=etree.HTML(respon)

            next_list=html.xpath("//div[@class='paging_box']//ul[@class='paging_ul']//li[@class='paging_lists']/a[@class='paging_a paging_bg']/@href")
            if len(next_list) ==0:
                pass
            else:
                # print('下一页的URL：%s'%next_list[1])
                next_url=self.host+next_list[1]


                #获取二级页面URL
                chlid_url=html.xpath(".//div[@class='content_box']//ul[@class='c-l_ul clear']//li/div[@class='pl_img_box']/a/@href")
                if len(chlid_url)==0:
                    pass
                else:
                    for c_url in chlid_url:
                        c_request=Request(headers=self.head,url=c_url)
                        c_respon=urlopen(c_request).read().decode('utf-8')
                        c_html=etree.HTML(c_respon)

                        #获取子页面中的图片地址
                        img_list=c_html.xpath(".//div[@class='detail_box']//p[@class='crop_img']/img/@src")

                        #获取图片标题
                        title=c_html.xpath(".//div[@class='detail_box']//h2/text()")
                        title_s=None
                        if len(title) == 0:
                            pass
                        else:
                            title_s=title[0]

                        self.down_pictures(title_s,img_list)

                self.get_next_url(next_url)

        except URLError as e:
            if hasattr(e, 'code'):
                print(e.code)
            if hasattr(e, 'reason'):
                print(e.reason)

    #下载图片
    def down_pictures(self,title,img_list):

        if len(img_list) == 0:
            pass
        elif title is None:
            pass
        else:
            try:
                os.mkdir(self.dir+title)
                for url in img_list:
                    img_name=str(url).split("/")[-1]
                    print('目录名:%s,文件名:%s'%(title,img_name))
                    file=open(self.dir+title+'\\'+img_name,'wb')
                    respon=urlopen(url=url)
                    file.write(respon.read())
            except Exception as e:
                print(e)


if __name__=='__main__':
    s=DesginSpider()
    print(s.get_next_url('http://bz.cndesign.com/list_13.html'))