#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author：albert time:2019/8/16
import  requests
from lxml import  etree
import  re
from urllib.request import urlretrieve
import  random,threading
import  time

dict = {}
img_url = []

class R(object):
    '''随机获取headers'''
    def getHeaders(self):
        user_agent_list = [ \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]
        UserAgent = random.choice(user_agent_list)
        headers = {'User-Agent': UserAgent}
        return headers
class rule(object):
    '''提取源代码的规则'''
    def re_rule(self):
        response = requests.get(self, headers=R.getHeaders(self))
        time.sleep(3)
        text = response.content.decode('utf-8')
        html = etree.HTML(text)
        return  html

class img_Url(object):
    def __init__(self,num):
        self.Base_url = 'http://www.umei.cc/bizhitupian/'
        self.num = num

    def Total_label(self):
            html = rule.re_rule(self.Base_url)
            title = html.xpath("//div[@class='w850 l oh']//a/@title")
            url = html.xpath("//div[@class='w850 l oh']//a/@href")
            # 循环镶嵌将标题与url结合
            index = 0
            for i in title:
                dict[i] = url[index]
                index += 1
            url = dict[self.num]
            html = rule.re_rule(url)
            pages = html.xpath("//div[@class='NewPages']//ul/li//@href")[-1].split('.')[0]
            return (int(pages), url)

class pages(object):
    def __init__(self,page_n,url):
        self.page_n = page_n
        self.url = url
    def get_paging(self):
        urls = [self.url + '{}.htm'.format(i) for i in range(1, self.page_n + 1)]
        '''获取每个url下的图片链接'''
        for url_n in urls:
            html = rule.re_rule(url_n)
            jpg_urls = html.xpath("//div[@class='TypeList']//a/@href")
            for jpg_url in jpg_urls:
                '''整站爬，分页，可以通过url进行判断
                if 页数2 == 网页状态404：
                    只是一张图片
                该系列有分页'''
                html = rule.re_rule(jpg_url)
                page_f = html.xpath('//*[@id="ArticleId60"]/p/a/img/@src')
                # img_url = [j for j in page_f]
                for j in page_f:
                    img_url.append(j)

    def extract(self):
        '''随机获取一张图片'''
        IMAGE_URL = random.choice(img_url)
        The_suffix = IMAGE_URL.split('.')[3]
        urlretrieve(IMAGE_URL, './image/img1.{}'.format(The_suffix))
        # return img_url
if __name__ == '__main__':
    print('''      ================欢迎来到图片选择器v1.0=====================
      =======================提示信息=============================
                        1：电脑壁纸  2：手机壁纸
                        3.动态壁纸   4:护眼壁纸
                        5:美女壁纸   6:小清新壁纸
                        7:唯美壁纸   8:风景壁纸
                        9:可爱壁纸   0:退出                              ''')
    form_list  = {1:'电脑壁纸',  2:'手机壁纸',3:'动态壁纸',   4:'护眼壁纸',5:'美女壁纸',
                  6:'小清新壁纸',7:'唯美壁纸',  8:'风景壁纸',9:'可爱壁纸',}
    while True:
        num = int(input("请输入您的选择(数字)："))
        if num == 0:
            break
        p = img_Url(form_list[num])
        page_n, url = p.Total_label()
        print('%s页数为：%s, url:%s' % (form_list[num], page_n, url))
        ps = pages(page_n,url)
        ps.get_paging()
        ps.extract()
        print(img_url)
        break

    # for k,v in dict.items():
    #     print('name:%s ===》 url:%s' %(k,v))

