'''
@Author: your name
@Date: 2020-03-22 19:59:03
@LastEditTime: 2020-03-23 11:27:47
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: \giee\learn_python\爬虫\爬虫基础\练习项目\美女写真.py
'''
import requests
from fake_useragent import UserAgent
from lxml import etree
import os
import time

def re_html(html):
    """解析出主页面链接html---分页url,标题，模特"""

    e = etree.HTML(html)
    fen_url = e.xpath('//ul/li[@class="image-box"]/a/@href')
    title_name = e.xpath('//ul/li[@class="image-box"]/a/img/@alt')
    author_name = e.xpath('//ul/li[@class="image-box"]/div[@class="tags"]/a[1]/text()')

    return fen_url, title_name, author_name

def re_picture(mini_html):
    """专题网页----->照片url"""

    e = etree.HTML(mini_html)
    picture_url = e.xpath('//div//img[@alt]/@src')

    return picture_url

def get_html(log_url):
    """url----->html"""
    headers = {
        "User-Agent": UserAgent().chrome
    }
    response = requests.get(log_url, headers=headers)
    response.encoding= "utf-8"
    
    return response

def save_picture(picture_url, title_name, author_name):
    """保存图片"""

    # 创建文件夹
    if not os.path.exists("F:/360MoveData/Users/浪浪/Desktop/暂时/{}".format(title_name)):
        os.mkdir("F:/360MoveData/Users/浪浪/Desktop/暂时/{}".format(title_name))

    # 图片保存
    i = 0
    for url in picture_url:
        i += 1
        file_name = author_name + "{}{}".format(i, ".jpg")
        print("正在下载{}".format(file_name))
        with open("F:/360MoveData/Users/浪浪/Desktop/暂时/{}".format(title_name) + "/" + file_name, "wb") as f:
            f.write(get_html(url).content)
            # print(file_name)

def main():
    """主函数"""
    
    url = "https://www.yeitu.com/meinv/xinggan/{}.html"
    for num in range(2, 329):
        log_url = url.format(num)

        # 返回网页函数
        html = get_html(log_url).text

        # 解析函数
        fen_url, title_name, author_name = re_html(html)
        for folder_data in zip(fen_url, title_name, author_name):
            
            # 构建专题内的每个小页url
            # TODO 将第一页显示出来
            i = 1
            picture_url = []
            while True:
                i += 1
                str_list = list(folder_data[0])
                str_list.insert(-5, "_" + str(i))
                a = "".join(str_list)

                if get_html(a).status_code == 404:
                    print("条件通过")
                    break

                # 返回每个小节的html
                mini_html = get_html(a).text

                # 找出照片的url
                picture_url.append(re_picture(mini_html)[0])  # 输出为列表
                print("第{}张".format(i))
                time.sleep(1)
            
            # 保存照片
            save_picture(picture_url, folder_data[1], folder_data[2])


            # re_folder = 



if __name__ == "__main__":
    main()