'''
使用bs4获取美女图片网
'''
import os.path
import re
import time

import requests
from bs4 import BeautifulSoup
import pprint as pp


# 创建文件夹
def mkdir(dir_name):
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)


# 获取美女图片路径
def get_img_src_by_content(content):
    pic_url = ''

    if content:
        tmp_div = content.find('div', attrs={'id': 'content'})
        if tmp_div:
            tmp_pic_url = tmp_div.find('img')
            if tmp_pic_url:
                pic_url = tmp_pic_url.get('src')

    return pic_url


# 获取内容页信息
def get_url_list_by_content(content):
    url_list = {}

    if content:
        tmp_list = content.find('div', attrs={'class': 'pic'})

        if tmp_list:
            tmp_list = tmp_list.find_all('li')
            if tmp_list:
                for tmp in tmp_list:
                    tmp_span = tmp.find_all('span')
                    if tmp_span:
                        tmp_dic = {}

                        # 获取page_id、title
                        tmp_page_id_title = tmp_span[0].find('a')
                        if tmp_page_id_title:
                            tmp_dic['page_id'] = re.sub('/pic/|.html', '', tmp_page_id_title.get('href'))
                            tmp_dic['title'] = tmp_page_id_title.text.split(' ')[2]
                            tmp_dic['content_url'] = index_url + tmp_page_id_title.get('href')

                        # 获取发布时间
                        tmp_dic['pub_time'] = tmp_span[1].text.replace('发布：', '')

                        # 获取图片总数
                        tmp_dic['pic_total'] = int(tmp_span[2].text.replace(' ', '').replace('共', '').replace('P', ''))

                        url_list[tmp_dic['page_id']] = tmp_dic

    return url_list


headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/114.0'}

# 首页地址
index_url = 'https://www.jxmm.net'

# 创建文件夹
mkdir('./images')
sleep = 1.5

'''
############################### 采集首页 开始 ###############################
response = requests.get(index_url, headers=headers)
response.encoding = 'utf-8'
html = BeautifulSoup(response.text, 'html.parser')

# 获取图片地址
info_list = get_url_list_by_content(html)

for page_id in info_list:
    # 创建文件夹
    pic_dir = './images/' + str(page_id) + '-' + info_list[page_id]['title']
    mkdir(pic_dir)
    content_url = info_list[page_id]['content_url']
    pic_total = info_list[page_id]['pic_total'] - 1

    response = requests.get(content_url, headers=headers, timeout=(10, 20))
    response.encoding = 'utf-8'
    html = BeautifulSoup(response.text, 'html.parser')

    # 获取美女图片路径
    file_name1 = pic_dir + '/1.jpg'
    if not os.path.exists(file_name1):
        pic_url = get_img_src_by_content(html)
        pic_content = requests.get(pic_url, headers=headers).content

        # 图片保存到本地
        with open(file_name1, mode='wb') as f:
            f.write(pic_content)
            f.flush()

        print('生成图片：{}    图片地址：{}'.format(file_name1, pic_url))
        time.sleep(sleep)
    else:
        print('图片 {} 已存在'.format(file_name1))
    print()

    i = 2
    while i <= pic_total:
        new_content_url = 'https://www.jxmm.net/pic/' + page_id + '_' + str(i) + '.html'
        new_response = requests.get(new_content_url, headers=headers, timeout=(10, 20))
        new_response.encoding = 'utf-8'
        new_html = BeautifulSoup(new_response.text, 'html.parser')

        # 获取美女图片路径
        new_file_name = pic_dir + '/' + str(i) + '.jpg'
        if not os.path.exists(new_file_name):
            new_pic_url = get_img_src_by_content(new_html)
            new_pic_content = requests.get(new_pic_url, headers=headers).content

            # 图片保存到本地
            with open(new_file_name, mode='wb') as f:
                f.write(new_pic_content)
                f.flush()

            print('生成图片：{}    图片地址：{}'.format(new_file_name, new_pic_url))
            time.sleep(sleep)
        else:
            print('图片 {} 已存在'.format(new_file_name))
        print()

        i += 1

############################### 采集首页 结束 ###############################
'''


############################### 从第2页开始采集 开始 ###############################
# 最大页
max_page = 100
page = 2
while page <= max_page:
    page_url = index_url + '/pic/index_' + str(page) + '.html'

    response = requests.get(page_url, headers=headers)
    response.encoding = 'utf-8'
    html = BeautifulSoup(response.text, 'html.parser')

    # 获取图片地址
    info_list = get_url_list_by_content(html)

    for page_id in info_list:
        # 创建文件夹
        pic_dir = './images/' + str(page_id) + '-' + info_list[page_id]['title']
        mkdir(pic_dir)
        content_url = info_list[page_id]['content_url']
        pic_total = info_list[page_id]['pic_total'] - 1

        response = requests.get(content_url, headers=headers, timeout=(40, 60))
        response.encoding = 'utf-8'
        html = BeautifulSoup(response.text, 'html.parser')

        # 获取美女图片路径
        file_name1 = pic_dir + '/1.jpg'
        if not os.path.exists(file_name1):
            pic_url = get_img_src_by_content(html)
            pic_content = requests.get(pic_url, headers=headers).content

            # 图片保存到本地
            with open(file_name1, mode='wb') as f:
                f.write(pic_content)
                f.flush()

            print('生成图片：{}    图片地址：{}'.format(file_name1, pic_url))
            time.sleep(sleep)
        else:
            print('图片 {} 已存在'.format(file_name1))
        print()

        i = 2
        while i <= pic_total:
            new_content_url = 'https://www.jxmm.net/pic/' + page_id + '_' + str(i) + '.html'
            new_response = requests.get(new_content_url, headers=headers, timeout=(40, 60))
            new_response.encoding = 'utf-8'
            new_html = BeautifulSoup(new_response.text, 'html.parser')

            # 获取美女图片路径
            new_file_name = pic_dir + '/' + str(i) + '.jpg'
            if not os.path.exists(new_file_name):
                new_pic_url = get_img_src_by_content(new_html)
                new_pic_content = requests.get(new_pic_url, headers=headers).content

                # 图片保存到本地
                with open(new_file_name, mode='wb') as f:
                    f.write(new_pic_content)
                    f.flush()

                print('生成图片：{}    图片地址：{}'.format(new_file_name, new_pic_url))
                time.sleep(sleep)
            else:
                print('图片 {} 已存在'.format(new_file_name))
            print()

            i += 1
    page += 1
############################### 从第2页开始采集 结束 ###############################