from bs4 import BeautifulSoup
import requests
import os

headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
    }
base_url = 'https://www.mntup.com'
base_path = 'C:/soft/pythonWorkspace/testfile/'
file_path = ''

#下载图片
def download(index,url):
    out = requests.get(url, headers=headers)
    out.encoding = 'gbk'
    soup = BeautifulSoup(out.text,'lxml')
    imgs = soup.find(class_='img').find_all('img')
    print('开始下载')
    try:
        for index1,img in enumerate(imgs):
            img_url = img.get('src')
            img_url = base_url + img_url
            imge = requests.get(img_url)
            f = open(file_path+str(index)+'_'+str(index1)+'.jpg', 'ab')
            f.write(imge.content)
            f.close()
        print('下载结束'+str(index))
    except Exception as e:
        print(e)
# 创建文件夹
def createFile(file_path):
    if os.path.exists(file_path) is False:
        os.makedirs(file_path)
    # 切换路径至上面创建的文件夹
    os.chdir(file_path)

#解析网页
def main():
    out = requests.get(base_url, headers=headers)
    soup = BeautifulSoup(out.text, "lxml")
    dans = soup.find_all('div', class_='dan')
    # enumerate循环获取下标
    for index, dan in enumerate(dans):
        print('第' + str(index) + '个人物')
        file_path = base_path + str(index) + '/'
        createFile(file_path)
        dan_href = dan.find('a').get('href')
        detail_url = base_url + dan_href
        print(detail_url)
        detail_out = requests.get(detail_url, headers=headers)
        # 获取详情页
        # 详情页总页数
        detail_soup = BeautifulSoup(detail_out.text, 'lxml')
        detail_num = detail_soup.find(class_='page').find_all('a')
        # detail_num = detail_soup.find('div',class_='page').find_all('a')
        print(len(detail_num) - 1)
        for i in range(1, len(detail_num) - 1):
            print('第' + str(index) + '个人物子页面')
            if i == 1:
                page_url = detail_url
            else:
                page_url = detail_url[:-5] + '_' + str(i) + '.html'
            download(i, page_url)

if __name__ == '__main__':
    main()

