import os
import time
import urllib2
from bs4 import BeautifulSoup

def request_url(url):
	hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
		'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
		'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
		'Accept-Encoding': 'none',
		'Accept-Language': 'en-US,en;q=0.8',
		'Connection': 'keep-alive'}
	req = urllib2.Request(url, headers=hdr)

	try:
		page = urllib2.urlopen(req)
	except urllib2.HTTPError, e:
		print e.fp.read()

	html = page.read()
	soup = BeautifulSoup(html, "lxml")
	return soup

def url_handle(url, dlpath):
	L_imgs = []
	soup = request_url(url)
#	soup = BeautifulSoup(open('page.html'), "lxml")
	L_imgs_all = soup.find_all('img')
	for s in L_imgs_all:
		L_imgs.append(str(s)[10:-3])

	if len(L_imgs) > 0:
		if os.path.exists(dlpath):
			return
			
		os.mkdir(dlpath)

		for d in L_imgs:
			command = "wget -c -P " + dlpath + " " + d
			os.system(command)

if __name__ == '__main__':
	pagenum = 71196
#	url = "https://www.susu57.com/htm/pic4/68817.htm"

	for i in range(50):
		url = "https://www.susu57.com/htm/pic2/" + str(pagenum) + ".htm"
		url_handle(url, str(pagenum))
		pagenum = pagenum - 1
		time.sleep(10)
