# _*_ coding: utf-8 _*_
# @Time : 2020/12/1 14:34
# @Author : moran office
# File : 彼岸图网.py
# Software : PyChram

import requests
import sys
import os
import io
from bs4 import BeautifulSoup

url = 'http://pic.netbian.com'
path = 'E:/python爬虫/python_彼岸图网/'

refer = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
}


# 检查目录
def check_dir(path):
    if not os.path.exists(path):
        os.makedirs(path)

'''
    content：网页数据
    return: 页数
'''
def get_page(content):
    soup = BeautifulSoup(content, "html.parser")
    page_div = soup.find('div', class_ = 'page')
    #print(page_div)
    page_str = page_div.text
    page = page_str.split("…")[1].split("下一页")[0]
    return page

# 将图片写入文件
'''
    href：图片访问连接
'''
def write_to_file(href, path):
    img_name = href.split('/')[-1]
    href = url + href
    print(href)
    try:
        pic = requests.get(href, headers = refer)
        f = open(path + '/' + img_name, 'wb')
        f.write(pic.content)
        f.close()
    except:
        print("发生错误,将跳过")


# 获取图片并保存
'''
  path：
  page：用来创建文件夹目录
'''
def get_pic(url, path, page):
    # print(url)
    try:
        response = requests.get(url, headers = refer)
        data = response.content.decode("gb2312")
        soup = BeautifulSoup(data, "html.parser")
        li_list = soup.find('ul', class_ = 'clearfix').find_all('li')
        child_path = path + '/' + page
        check_dir(child_path)
        for li in li_list:
            # print(li.text)
            href = li.find('a').find('img')['src']
            # print(href)
            write_to_file(href, child_path)
    except:
        print("=================== get_pic方法中发生错误 =================")
        return


# 请求子网页
def getUrl(url, title):
    try:
        check_dir(path + title)
        base_url = url
        res = requests.get(url, headers = refer)
        if title == '4K游戏':
            data = res.content.decode("gbk")
        else:
            data = res.content.decode("gb2312")
        # print(data)
        page = get_page(data)
        for i in range(1, int(page)):
            if i == 1:
                url = url + 'index.html'
            else:
                url = url + f'index_{ i }.html'
            get_pic(url, path + title, str(i))
            print(f'第 {i} 页图片保存完成')
            url = base_url
    except:
        print("请求子网页发生错误")

sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
response = requests.get(url, headers = refer)
# response.encoding = response.apparent_encoding
data=response.content.decode("gbk")
# print(data)

soup = BeautifulSoup(data, "html.parser")
a_list = soup.find('div', class_ = 'classify clearfix').find_all('a')
# print(a_list)

# 构造请求url
for a in a_list:
    child_url = url + a['href']
    print(a.text)
    print(child_url)
    getUrl(child_url, a.text) # 请求子url返回的网页数据
    # getUrl('http://pic.netbian.com/4kyouxi/', '4K游戏') # 请求子url返回的网页数据 # 4k游戏页面的字符集跟其他页面不一样
    print(f'{a.text}分类下的图片保存完成')
