#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#

import urllib
import sys
import re
import os
import string
import threading


# http://www.douban.com/photos/album/[0-9]+/   single album url format
# http://www.douban.com/people/[^/]+/photos list of albums url format

savedir = '/Users/macbook/Dropbox/寫真分享/'
def usage():
	print "Usage: albumfetch.py URL"

if len(sys.argv) != 2:
	usage()
	sys.exit(1)

dburl = sys.argv[1]

if locals().has_key('savedir'):
	savedirBase = savedir
elif os.environ.has_key('HOME') and os.path.isdir(os.environ['HOME']):
	#savedirBase = os.environ['HOME']+'/'+'/doufetch/albums'
	savedirBase = os.environ['HOME']+'/doufetch/albums/'
else:
	savedirBase = './doufetch/albums/'

type = ''
if re.match(r"http://www.douban.com/photos/album/[0-9]+/?", dburl):
	type = 'album'
elif re.match(r"http://www.douban.com/people/[^/]+/photos/?", dburl):
	type = 'albums'

if type =='':
	print '[ERROR] unknown URL format'
	sys.exit(1)


def getAlbumInfo(url=''):
	data = urllib.urlopen(url).read()
	matches = re.findall(r"""(.*?)title="(.*?)" id="gallery"(.*?)""", data)
	(_title, _author) = ('', '')
	if len(matches)>0:
		_title = matches[0][1]
		_author = string.split(_title, "-")[0]
	return (_title, _author)
		

def getImageURLs(url=''):
	_o = 0
	_URLs = []
	while 1:
		try:
			_url = url+"?start="+str(_o)
			print "[INFO] opening %s " % _url
			data = urllib.urlopen(_url).read()
			matches = re.findall(r"""<img src="(.*?/photo/thumb/.*?)" />""", data)
			if len(matches)>0:
				_urlset =  [string.replace(x, "/photo/thumb/", "/photo/photo/") for x in matches]
				_URLs += _urlset
				_o += 18
			else:
				break
		except:
			print "[INFO] failed opening %s " % _url
			break
	return _URLs

def getImageURLs2(url=''):
	'''cooliris implementation'''
	_URLs = []
	coolirisURL = url+'/cooliris'
	data = urllib.urlopen(coolirisURL).read()
	matches = re.findall(r"""<media:content url="(.*?/photo/photo/.*?)"/>""", data)
	if len(matches)>0:
		_URLs = matches
	else:
		print "[INFO] empty album"
	return _URLs

def getAlbumURLs(url=''):
	_o = 0
	_URLs = []
	while 1:
		try:
			_url = url+"?start="+str(_o)
			print "[INFO] opening %s " % _url
			data = urllib.urlopen(_url).read()
			matches = re.findall(r"""<a class="album_photo" href="(.*?/photos/album/.*?)"><img class="album""", data)
			#print matches
			if len(matches)>0:
				#_urlset =  [string.replace(x, "/photo/thumb/", "/photo/photo/") for x in matches]
				_URLs += matches
				_o += 16
				#print _URLs
				#print "we got %i albums" % len(_URLs)
			else:
				break
		except:
			print "[INFO] failed opening %s " % _url
			break
	return _URLs


class FetchImage(threading.Thread):
	def __init__(self, urls=[], dburl=''):
		threading.Thread.__init__(self)
		self.urls = urls
		self.dburl = dburl
		(self.title, self.author) = getAlbumInfo(self.dburl)
	def run(self):
		savedir = savedirBase + self.author + '/' + self.title.strip()
		if not os.path.isdir(savedir):
			print "[INFO] creating %s" % savedir
			os.makedirs(savedir)
		for url in self.urls:
			basename = os.path.basename(url)
			fullsavename = savedir+'/'+basename
			print "[INFO] saving as %s" % fullsavename
			urllib.urlretrieve(url, fullsavename)
	

def fetchImagesbyAlbum(dburl=''):
	'''  basic implementation '''
	imageURLs = getImageURLs2(dburl)
	#print imageURLs
	(title, author) = getAlbumInfo(dburl)
	savedir = savedirBase + author + '/' + title
	if not os.path.isdir(savedir):
		print "[INFO] creating %s" % savedir
		os.makedirs(savedir)
	for url in imageURLs:
		basename = os.path.basename(url)
		fullsavename = savedir+'/'+basename
		print "[INFO] saving as %s" % fullsavename
		urllib.urlretrieve(url, fullsavename)

def fetchImagesbyAlbum2(dburl=''):
	''' threading implementation '''
	imageURLs = getImageURLs2(dburl)
	t = FetchImage(imageURLs, dburl)
	t.start()
	return t

if type == 'album':
	t = fetchImagesbyAlbum2(dburl)
	t.join()

elif type == 'albums':
	albumURLs = getAlbumURLs(dburl)
	threads = []
	for a in albumURLs:	
		t = fetchImagesbyAlbum2(a)
		threads.append(t)
	for t in threads:
		t.join()
		
