# -*- coding: utf-8 -*-
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#作者：cacho_37967865
#博客：https://blog.csdn.net/sinat_37967865
#文件：getBeauty.py
#日期：2018-05-13
#备注：get_one_page()函数传参控制url并转化为可解析内容；save_one_image()函数获取图片路径并下载文件；main()函数进行传参循环下载图片    
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''


import os
import requests
import urllib.request
from urllib.parse import urlencode
from bs4 import BeautifulSoup
from tqdm import tqdm

def get_one_page(beauty,page):
    paras = {
        'id' : beauty,        #某个美女的id
        'mm' : page           #每个美女有多张图片，每张对应一页
    }

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'
    }

    url = 'http://www.xingmeng365.com/articles.asp?' + urlencode(paras)

    # 获取网页内容，返回html数据
    response = requests.get(url, headers=headers)
    response.encoding = 'gb2312'
    print("当前爬取的网址为：" + url)
    return response.text

def save_one_image(html,path,id,mm):
    soup = BeautifulSoup(html,'lxml')
    for link in soup.find_all('img'):
        if "/upload/image" in link.get('src'):
            image_url = link.get('src')
            if id <= 6:
                image_url = "http://www.xingmeng365.com/" + image_url[6:]
            else:
                image_url = "http://www.xingmeng365.com/" + image_url[1:]  # id=7以后，[6:]改为[1:]
            fileName = soup.find('h1').get_text()
            os.chdir(path)
            image_path = str(id) + '-' + str(fileName)
            if not os.path.exists(image_path):
                print("开始创建文件夹：" + str(fileName))
                os.makedirs(image_path)
            os.chdir(image_path)
            print("开始下载第" + str(mm) + "张图片：" + image_url)
            # 可以自定义命名图片名称，好检索
            file = open(str(id) + '-' + str(mm) + '.jpg', "wb")
            req = urllib.request.Request(url=image_url)
            try:
                image = urllib.request.urlopen(req, timeout=20)
                pic = image.read()
            except Exception as e:
                print("第" + str(mm) + "张图片访问超时，下载失败：" + image_url)
                continue
            file.write(pic)
            print("第" + str(mm) + "张图片下载成功")
            file.close()

def main(start_id,end_id,page,path):
    for id in tqdm(range(start_id,end_id)):
        for mm in range(1,page):
            html = get_one_page(id,mm)
            save_one_image(html,path,id,mm)

if __name__ == '__main__':
   main(124, 760, 60, 'F:\Lingwei\AllPhoto')
