# -*- coding: utf-8 -*-
# @Time : 2021/11/3 21:43 
# @Author : TanDaBao
# @File : beautypicture.py
'''
优美图库，详情分页图片，全部下载
'''
import requests, os, re
from lxml import etree
from multiprocessing.pool import Pool

class BeautyPicture():
    def __init__(self, page):
        self.proxies = {"http": None, "https": None}
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'
        }
        self.url = 'https://www.umei.cc/meinvtupian/rentiyishu/'
        self.page = page

    def mkdir(self):
        # 创建存储图片目录
        dirName = 'ImgLibs'
        if not os.path.exists(dirName):
            os.mkdir(dirName)
        return dirName

    # 设置分页
    def page_num(self):
        urls = []
        url = 'https://www.umei.cc/meinvtupian/rentiyishu/index_%d.htm'
        index = 2
        for page in range(self.page):
            if page == 0:
                urls.append(self.url)
            else:
                urls.append(url%index)
                index += 1
        return urls


    # 获取标题和详情页url
    def detail_page(self):
        urls = self.page_num()
        result_list = []
        # 逐页获取
        for url in urls:
            response = requests.get(url=url, proxies=self.proxies,
                                    headers=self.headers, timeout=10)
            response.encoding = 'utf-8'
            html = response.text

            tree = etree.HTML(html)
            li_list = tree.xpath('/html/body/div[2]/div[8]/ul/li')
            # 获取详情页url、标题的字典
            for li in li_list:
                dic = {}
                # 这里要用//text()取文本数据，不知道这个憨批网站是怎么回事，单独在第3页的倒数第2个图集的标题在span标签内又嵌套了一个<strong>标签
                name = li.xpath('./a//span//text()')[0]
                new_url = 'https://www.umei.cc/' + li.xpath('./a/@href')[0]
                dic['name'] = name
                dic['url'] = new_url
                result_list.append(dic)
        return result_list


    def detail_Son_page(self):
        # 获取详情页所有图片url
        result_list = self.detail_page()
        son_list = []
        for dic in result_list:
            name = dic['name']
            url = dic['url']
            response = requests.get(url=url, proxies=self.proxies,
                                    headers=self.headers, timeout=5)
            response.encoding = 'utf-8'
            html = response.text

            tree = etree.HTML(html)
            # 获取详情页最终图片数量，这个异常捕获防止有的详情页只有一张图片没有分页，那就单独操作
            try:
                last_url = tree.xpath('/html/body/div[2]/div[12]/a[last()]/@href')[0]
                last_num = int(re.match('(.*?)(\d+)_(\d+)(.htm)', last_url).group(3))

                # 获取详情分页url
                for page in range(1, last_num):
                    son_dict = {}
                    if page == 1:
                        son_dict['url'] = url
                        son_dict['name'] = name + str(page)
                    else:
                        son_dict['url'] = re.sub('(.*?)(\d+)(.htm)', r'\1\2_%s\3', url) % str(page)
                        son_dict['name'] = name + str(page)
                    son_list.append(son_dict)
            except:
                son_dict = {}
                son_dict['url'] = url
                son_dict['name'] = name
                son_list.append(son_dict)
                continue

        return son_list


    def downLoad(self):
        # 批量下载图片
        dirName = self.mkdir()
        son_list = self.detail_Son_page()
        # print(son_list)
        for dict in son_list:
            name = dict['name']
            url = dict['url']
            try:
                response = requests.get(url=url, proxies=self.proxies,
                                        headers=self.headers, timeout=5)
                response.encoding = 'utf-8'
                html = response.text

                tree = etree.HTML(html)
                jpg_url = tree.xpath('//*[@id="ArticleId{dede:field.reid/}"]/p/a/img/@src')[0]
                print(jpg_url)
                try:
                    content = requests.get(url=jpg_url, proxies=self.proxies,
                                            headers=self.headers, timeout=5).content

                    jpgPath = dirName + '/' + name + '.jpg'
                    with open(jpgPath, 'wb') as f:
                        f.write(content)
                    print(name, '\t\t下载成功')
                except Exception as e:
                    print(e)
                    continue
            except Exception as e:
                print(e)
                print('异常url：', url)
                continue


if __name__ == '__main__':
    BeautyPicture(4).downLoad()    # 获取前n页所有的图片

