# -*- coding:utf-8 -*-
'''
问题：
下载图片的过程中,有一些图片没有下载成功，。。。。。。。
无法显示
'''
from bs4 import BeautifulSoup
import urllib2,urllib,re
import sys,os,time



x = 0
#下载进度条
def report(count, blockSize, totalSize):
	percent = int(count*blockSize*100/totalSize)
	print ''  
	sys.stdout.write("\r%d%%" % percent + ' complete')  
	sys.stdout.flush()
	print ''


#获取每页的代码
def get_page_url(url):
	img_page_list = []
	wb_date = urllib2.urlopen(url).read()
	soup = BeautifulSoup(wb_date,'lxml')
	list_page = soup.select('a.imageLink')
	for i in list_page:
		img_page_list.append(i.get('href'))
	return img_page_list


#获取图片下载地址
def get_img_link(url):
	imgs = []
	# url = 'http://www.jdlingyu.moe/16530/'
	user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.87 Safari/537.36'
	headers = { 'User-Agent' : user_agent }
	request = urllib2.Request(url,headers=headers)
	wb_date = urllib2.urlopen(request).read().decode('utf-8')
	soup = BeautifulSoup(wb_date,'lxml')
	img_lsit= soup.select('div.main-body > p > a')
	title = soup.select('h2.main-title')[0].text
	
	for i in img_lsit:
		imgs.append(i.get('href'))
	
	date = {
		'title':title,
		'imgs':imgs,
	}
	return date



#构造分页列表 和下载
def get_more_page(start=1,end=2):
	start = time.time()
	for i in range(1,2):
		url = 'http://www.jdlingyu.moe/page/{}/'.format(i)
		folder_Name = 'imgs_3'
		make_folderName(folder_Name)
		for page_url in get_page_url(url):
			#获取每一页当中的每一章的图集链接
			print page_url,'......列表打开中......'
			downImg(get_img_link(page_url),folder_Name)
			time.sleep(2)
	end = time.time()
	long_time(start,end)


		
#下载图片咯
def downImg(img_dict,folderName):
	title = img_dict['title']
	print '图集---||<< %s >>' % title.encode('utf-8')
	for i in img_dict['imgs']:
		global x
		urllib.urlretrieve(i,'./%s/%s.jpg' % (folderName,x))
		print '下载图片',i,'------------------->OK'
		x += 1

#创建目录
def make_folderName(folderName):
	dirList = './%s' % folderName
	if not os.path.exists(dirList):
		os.mkdir(folderName)

#计算时间差
def long_time(start,end):
	hao_long_time = int(end) - int(start)
	if hao_long_time > 60 :
		do_time = divmod(hao_long_time,60)
		print '总共消耗了%s小时%s分钟' % (do_time[0],do_time[1])
	elif hao_long_time % 60 == 0 :
		do_time = hao_long_time / 60 
		print '总共消耗了%s小时' % do_time
	else:
		print '总共消耗了%s分钟' % hao_long_time
# #构造分页列表
# def pageIndex():
# 	x=0
# 	page = 'http://www.jdlingyu.moe/page/%s/'1
# 	for i in xrange(1,164):
# 		print page % i,'打开中.........'
# 		pageurl = page % i 
# 		# print getPageIndex(pageurl)
# 		for page_url in getPageIndex(pageurl):
# 			last_page = getImg(page_url)
# 			print '下载图集>>>>>%s' % last_page[0]
# 			for imgnum,imglist in enumerate(set(last_page[1])):
# 				print '第 %s 张图片下载.....' %imgnum
# 				print imglist
# 				urllib.urlretrieve(imglist,'./img/%s.jpg' % x,reporthook=report)
# 				x+=1
# 			# for k,v in getImg(page_url).items():
# 			# 	print v[0]
# 			# 	print '下载图集>>>>>%s' % last_page[0]
# 			# 	for i,imglist in enumerate(img_url[1]):
# 			# 		print '第 %s 张图片下载' %i
# 			# 		downImg(imglist) 


# def testWrite(content):
# 	t = open('./img.txt','w+')
# 	t.write(content)
# 	t.close()
# '''
# def testread(path):
# 	t = open(path,'r')
# 	content = t.readlines()
# 	t.close()
# 	return conten
# '''
# # print testread('./ht.html')
# # print getIndex()
# # downImg(getIndex())
# # pageIndex()
# # for i in getPageIndex('http://www.jdlingyu.moe/page/2/'):
# # 	print i 
# # print getPageIndex('http://www.jdlingyu.moe/16472/')
# imgurl = getImg('http://www.jdlingyu.moe/16515/')
# # t = open('./img1.txt','w+')
# # for imglist in imgurl[1]:
# # 	t.write(imglist)
# # t.close()
# print len(imgurl)
# print imgurl
# # print 'end'
# # x = 1 
# # for imgnum,imglist in enumerate(imgurl[1]):
# # 	print '第 %s 张图片下载.....' %imgnum
# # 	print imglist
# # 	urllib.urlretrieve(imglist,'./test/%s.jpg' % x,reporthook=report)
# # 	x+=1
# 
# img_code = get_img_link()
get_more_page(1,10)
