#!usr/bin/env python  
# -*- coding:utf-8 _*-
""" 
@author:pengfeifu
@file: av.py.py 
@version:
@time: 2021/11/07 
@email:1324734112@qq.com
@desc： 爬虫av图片
@function：常用函数
"""

import importlib
import os
import random
import re
import sys
import time

import requests
from bs4 import BeautifulSoup

importlib.reload(sys)

server_photo_url = "https://www.1z22.com"

server_dict = [
    {
        "title": "cartoon_photo",
        "server_path": "https://www.1z22.com/avshipin6/13/",
        "type": 0  # 需要参与过滤
    }
    # {
    #     "title": "secret_photo",
    #     "server_path": "https://www.1z22.com/avshipin6/7/",
    #     "type": 1
    # }
    # }, {
    #     "title": "woman_photo",
    #     "server_path": "https://www.1z22.com/avshipin6/10/",
    #     "type": 1
    # }
]
save_file_path = "/Users/mac/Pictures/net/"

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36"
}


# 创建文件夹
def mkdir_directory(file_path):
    if os.path.exists(file_path) is False:
        # 创建文件夹
        os.makedirs(file_path)
    # 切换到当前路径
    os.chdir(file_path)


def file_write(file_name, file_content):
    try:
        with open(file_name, "ab") as f:
            f.write(file_content)
    except IOError:
        print("文件写入失败")
    finally:
        print("%s写入成功" % file_name)
        f.close()


# 文件写入函数，photo_src图片页面的路径，不是文件路径，type:标识是否需要过滤
def inner_html(page_url, page_file_url, type):
    inner_res = requests.get(page_url, headers=headers)
    inner_soup = BeautifulSoup(inner_res.text, "html.parser")
    li_list = inner_soup.find("div", class_="box list channel").find("ul").find_all("li")
    for li in li_list:
        a = li.find("a", target="_blank")
        photo_src = server_photo_url + a.attrs["href"]
        file_title = a.text
        if type == 0:
            if not re.findall("3D全彩", file_title):
                continue
        mkdir_directory(page_file_url + "/" + file_title)
        # 图片下载
        file_download(photo_src)


# 模拟点开二级页面
def file_download(photo_src):
    photo_page_res = requests.get(photo_src, headers=headers)
    photo_page_soup = BeautifulSoup(photo_page_res.text, "html.parser")
    img_list = photo_page_soup.find("div", class_='content').find_all("img")
    counter = 0  # 便于排序
    for img in img_list:
        counter += 1
        img_src = img.get("src")
        img_name_arr = img_src.split("/")
        img_name = str(counter) + "-" + img_name_arr[len(img_name_arr) - 1]
        header = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36",
            "Referer": img_src
        }
        img_res = requests.get(img_src, headers=header)
        print("开始保存图片", "*" * 120)
        file_write(img_name, img_res.content)


if __name__ == "__main__":
    for server in server_dict:
        server_path = server["server_path"]
        res = requests.get(server_path, headers=headers)
        soup = BeautifulSoup(res.text, "html.parser")
        page_content = soup.find("div", class_="pagination")
        # r匹配中间内容
        page_total = re.findall(r"1\/(\d+)页", page_content.text)[0]
        for page_num in range(12, int(page_total) + 1):
            time.sleep(random.randint(1, 2))
            # 保存页码的文件路径
            page_file_url = save_file_path + "/" + server["title"]
            mkdir_directory(page_file_url)
            page_url = server_path + str(page_num) + ".html"
            print("正在爬取第%d页图片信息：" % page_num)
            inner_html(page_url, page_file_url, server["type"])
