#!/bin/python
# -*- coding: utf-8 -*-

#############################################
#PROJECT : Download Tool
#FILE    : downImage.py
#USAGE   : Download Picture form some websit 
#CREATION: 20120610
#LICENSE : IIE
#$ID$    : Leon
#############################################

import urllib
import urllib2
from BeautifulSoup import BeautifulSoup
import sys
import signal
import os
import Queue
import threading
import time
'''
global virable
'''
# 2011 07 17
imgDateS = '[2012 06 18]'  #include
imgDateE = '[2012 06 16]'  #include
queue = Queue.Queue()
out_queue = Queue.Queue()
exitFlag = False

'''
@param  : down image from URL, the filename begins with 'filename'
@return : None
'''
def downloadFromPage(filename, URL):
	print "down page -> ", URL

	try:
		page = urllib2.urlopen(URL)
	except:
		print 'urllib2.urlopen error:', URL
		return 

	soup = BeautifulSoup(page, fromEncoding="gb18030")

	i=1
	for x in soup.findAll('img'):
		#print 'downloading --> ', x['src']
		if os.path.isfile(filename + '_' + str(i) + '.jpg'):
			#print 'exist'
			i += 1
			continue

		try:
			urllib.urlretrieve(x['src'], filename + '_' + str(i) + '.jpg')
		except:
			#if not x['src']
			#print 'urllib.urlretrieve error:', x['src']
			pass

		i += 1

class ThreadPage(threading.Thread):
	def __init__(self, out_queue):
		threading.Thread.__init__(self)
		self.out_queue = out_queue

	def run(self):
		while True:
			(filename, URL) = self.out_queue.get()
			downloadFromPage(filename, URL)
			#print filename, 'complete'

			self.out_queue.task_done()
			#print 'out_queue get ', out_queue.qsize()

if __name__ == '__main__':
        
	for i in range(10):
		tp = ThreadPage(out_queue)
		tp.setDaemon(True)
		tp.start()

	path = '/home/leon/p/'
	k = 1
	exitFlag = False
	while not exitFlag:
		URL = 'http://www.rrr81.com/oumei/list_4_' + str(k) + '.html'
		print 'links: ', URL
		try:
			page = urllib2.urlopen(URL)
		except:
			print 'urllib2.urlopen error: ', URL
			break
		k += 1
		
		soup = BeautifulSoup(page, fromEncoding='gb18030')
		URLHeader = URL.find('/', 10);
		date = soup.findAll('span')
		links = soup.findAll(target="_blank")
			
		for i in range(len(links)):
			#print date[i].string, links[i].string
			if date[i].string < imgDateE:
				print 'date[i].string < imgDate'
				exitFlag = True
				break
			elif date[i].string > imgDateS:
				print 'date[i].string > imgDate'
				continue
			if not os.path.exists(path + date[i].string):
				os.mkdir(path + date[i].string)
			filename = path + date[i].string + '/' + links[i].string
			out_queue.put((filename, URL[:URLHeader] + links[i]['href']))

	print 'waiting'
	out_queue.join()
	print 'all complete'
