# _*_ coding: utf-8 _*_
# @Time : 2020/10/21 13:19
# @Author : moran office
# File : 3gbizhi2.py
# Software : PyChram

import requests
from bs4 import BeautifulSoup
import os
import io
import sys
import time


# https://www.3gbizhi.com/meinv/index_1.html
# https://www.3gbizhi.com/meinv/index_23.html
path = 'E:/py_mnxz/'
# 请求头
refer = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')

# 保存新页面的连接
new_page_list = {}

# 保存当前页面中所有的页面连接到 list中
def get_new_page_list(soup):
    li_list = soup.find('div', class_='contlistw mtw').find('ul', class_='cl').find_all('li')
    # print(li_list)
    for li in li_list:
        a = li.find('a')
        # 标题
        title = a["title"]
        # 页面连接
        href = a["href"]
        new_page_list[title] = href

# 请求所有的连接
def request_new_page_list():
    if not new_page_list:
        print('字典中没有数据，结束')
        return
    # 开始请求
    for (title, href) in new_page_list.items():
        file_name = title  # 文件夹名称
        if (os.path.exists(path + file_name)):
            print('目录已存在')
            flag = 1
        else:
            os.makedirs(path + file_name)
            flag = 0
        os.chdir(path + file_name)  # cd 进文件夹
        if (flag == 1 and len(os.listdir(path + file_name)) >= 10):
            print('已经保存完毕，跳过')
            continue
            # return
        new_href1 = href.rsplit('.', 1)[0]
        # print(new_href)
        index = 1
        while True:
            new_href = new_href1 + '_' + str(index) + '.html'
            print(new_href)
            response = requests.get(new_href, headers = refer)
            if not response.ok:
                print('请求失败，应该是url不存在')
                break
            data = response.content.decode('utf-8')
            soup = BeautifulSoup(data, 'html.parser')
            img = soup.find('img', id='showpicnow')
            # print(img)
            img_url = img["src"]
            # print(img_url)
            img_name = img_url.split('/')[-1]
            if (img_url == ""):
                print("url为空")
                continue
            try:
                print("保存第{}张图片".format(index))
                img = requests.get(img_url)
                f = open(img_name, 'wb')
                f.write(img.content)
                f.close()

            except:
                print("发生错误")
            index += 1
    new_page_list.clear()

for i in range(8):
    # url = 'https://www.3gbizhi.com/meinv/dmmn_{}.html'.format(i + 1)
    # url = 'https://www.3gbizhi.com/meinv/bjnmn_{}.html'.format(i + 1)
    url = 'https://www.3gbizhi.com/meinv/mnxz_{}.html'.format(i + 1)
    print(url)
    response = requests.get(url, headers=refer)
    data = response.content.decode('utf-8')
    soup = BeautifulSoup(data, 'html.parser')
    # print(soup.text)
    # 获取所有连接
    get_new_page_list(soup)
    print('连接保存完毕')
    print(new_page_list)
    # 请求所有连接
    request_new_page_list()














