# -*- coding:utf-8 -*-

# 亿秀网  http://www.tu11.com/xingganmeinvxiezhen/

from bs4 import BeautifulSoup
from urllib import request
import requests
import os
import time
import random
import json
from User_Agent import UserAgent
from http import cookiejar

# 定义获取html内容方法，返回dom对象
def getHtmlContent(http_url):
	s = requests.session()

	headers = {
	    "User-Agent": random.choice(UserAgent.USER_AGENTS)
	}

	req = s.get(url=http_url, headers=headers)
	if req is None:
		print('不存在该地址：',http_url)
		return None
	req.encoding = 'gb2312'
	htmlContent = req.text
	return BeautifulSoup(htmlContent, 'lxml')

# 获取页面的总翻页数量
def getPageNum(bf):
	pageContent = bf.find('div', class_='row dede_pages')
	if pageContent is None:
		return 10
	pageContent = pageContent.contents[0].find_all('a')
	page_a = pageContent[-2]
	pageNum = BeautifulSoup(str(page_a), 'lxml').get_text()
	return int(pageNum)

# 下载指定页面的美女图片
def uploadPageImage(bf,fileName,num):
	image_urls = bf.find('div', class_='nry')
	if image_urls is None:
		image_urls = bf.find('div', class_='arc-body')

	if image_urls is None:
		print('该页面不存在图片！')
		return '下载失败，该页面不存在图片！'

	image_urls = image_urls.find_all('img')
	nameNum = 0
	for each_image_url in image_urls:
		img_src = each_image_url.get('src')

		imgName = fileName + '_%d_%d.jpg' % (num, nameNum)
		nameNum += 1
		print(imgName,"下载地址：",img_src)
		if 'niceImg' not in os.listdir():
			os.makedirs('niceImg')
		try:
			print(imgName)
			request.urlretrieve(url=img_src,filename='niceImg/'+ imgName)
		except Exception as e:
			print(e)
	return '下载成功！'
	

# 定义一个下载图片的函数，入参为图片页面的的地址集合
def uploadImage(list_url):
	print('---------开始解析页面地址--------')
	for each_img in list_url:
		img_info = each_img.split('=')
		fileName = img_info[0]
		img_url = img_info[1]
		print(fileName,img_url)
		bf = getHtmlContent(img_url)
		if bf is None:
			continue
			
		pageNum = getPageNum(bf)+1
		for urlNum in range(1,pageNum):
			if urlNum == 1:
				target_url = img_url
			else:
				target_url = img_url.replace('.html', ('_%d.html' % urlNum))
			bf = getHtmlContent(target_url)
			if bf is None:
				continue
			uploadPageImage(bf,fileName,urlNum)

# 当运行main方法时候，运行下列代码
if __name__ == '__main__':
	url = 'http://www.tu11.com/qingchunmeinvxiezhen/list_4_1.html'
	bf = getHtmlContent(url)
	targets_url = bf.find_all(class_='shupic')
	#print(targets_url)
	# 获取链接
	list_url = []
	for each in targets_url:
	    urlStr = each.p.a.get('title') + '=http://www.tu11.com' + each.a.get('href')
	    list_url.append(urlStr)
	uploadImage(list_url)

