#!/usr/bin/env python
#-*-conding:utf-8-*-

import requests # 发送http请求
from bs4 import BeautifulSoup # 解析html pip install lxml
import lxml # 解析器 中文不乱码
import os # 创建文件夹

# 爬取 mm131 图片
start_url = "http://www.mm131.com/mingxing/"

headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6788.400 QQBrowser/10.3.2864.400',
'Referer':start_url
}
path = 'C:/Users/Administrator/Desktop/tmp' #保存文件路径
folder = '/mm131/mingxing/'

# 下载图片
def download_img(img_url, path):
	img = requests.get(img_url,  headers=headers)
	name = path+'/'+img_url.split('/')[-1]
	with open(name, "ab") as f:
		f.write(img.content)


# 1.获取html内容
response = requests.get(url=start_url, headers=headers)


# 2.解析html内容
soup = BeautifulSoup(response.content, "lxml")

# 查找标签
a_list = soup.find("div", attrs={"class":"main"}).find("dl",attrs={"class":"list-left public-box"}).find_all("a",  attrs={"target":"_blank"}, recursive=True)

url_list = []
for i in a_list:
	url_list.append(i["href"])


for i in url_list:

	headers['Referer'] = i
	response = requests.get(url=i, headers=headers)
	soup = BeautifulSoup(response.content, "html.parser")

	title = soup.find("h5").text # 文件名
	page = soup.find("div",attrs={'class':'content-page'}).find_all('a')[-2].text
	img_url = soup.find("div",attrs={'class':'content-pic'}).find("img")['src']
	
	if not os.path.isdir(path+folder+title):
		os.makedirs(path+folder+title)

	# 下载第一张图片
	headers['Referer'] = i
	download_img(img_url, path+folder+title)

	for j in range(2,int(page)+1):
		img_url2 = img_url.split('1.jpg')[0]+str(j)+'.jpg'
		# 下载图片
		headers['Referer'] = i.split('.html')[0]+'_'+i+'.html'
		download_img(img_url2, path+folder+title)

