# coding=utf-8

import requests, time, urllib, threading
from bs4 import BeautifulSoup

# https://img3.doubanio.com/view/photo/lphoto/public/p2469248831.webp

# https://img3.doubanio.com/view/photo/lthumb/public/p2469248831.webp

url = 'https://www.douban.com/photos/album/1641111834/?start=%d'
step = 18


def save_image(url, conn):
	try:
		rsp = conn.get(url)
		name = str(long(int(round(time.time() * 1000))))
		f = open('C://WORKSPACE//logs//' + name + ".jpg", 'wb')
		f.write(rsp.content)
		f.close()

	except requests.HTTPError, e:
		print e


def save(url):
	name = str(long(int(round(time.time() * 1000))))
	urllib.urlretrieve(url, 'C://WORKSPACE//logs//' + name + '.jpg')


def parse_page(url, conn):
	rsq = conn.get(url)
	print rsq.status_code
	html = rsq.content
	dom = BeautifulSoup(html, 'html.parser')
	imgs = dom.select('.photo_wrap img')
	for img in imgs:
		p = img['src'].replace('lthumb', 'lphoto')
		save_image(p, conn)
		time.sleep(1)
		print p, 'OK'

	print 'OK', (len(imgs))


#########################################################

if __name__ == '__main__':
	print '###########################################'
	conn = requests.session()
	rsq = conn.get(url)
	print rsq.status_code
	html = rsq.content
	dom = BeautifulSoup(html, 'html.parser')
	all_pages_num = dom.select('.thispage')[0]['data-total-page']
	title=dom.select('#db-usr-profile h1')
	print title[0]
	##th=[]
	for page in range(0, 18 * int(all_pages_num), 18):
		print url % (page)
		t = threading.Thread(None, target=parse_page, args=(url % (page),conn))
		t.setDaemon(True)
		t.start()
	t.join()
	print 'all is ok'
	conn.close()
