# -*- coding: utf-8 -*-


"""
Downloads images from Google  or Bing, using thier ajax api.

Note that:
1. Google currently allows only 32 images...arggg!
2. Bing requires an AppID key that you need to obtain from Bing's developer website
3. When passing the query, you should put it in quotes, so that it will not be interpertered as multiple arguments

Udi Weinsberg
"""

import urllib2
import urllib
import httplib
import simplejson
import socket

_HEADERS = { 'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:17.0) Gecko/20100101 Firefox/17.0" }

def download( url ):
	try:
		request = urllib2.Request( url, None, _HEADERS )
		url = urllib2.urlopen(request)
		result = url.read()
	except urllib2.HTTPError:
		result = None
	except urllib2.URLError:
		result = None
	except httplib.BadStatusLine:
		result = None
	except socket.error:
		result = None
	return result

class ImageRipper:
	_QUERY_URL = "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q={query}&userip={user_ip}&imgsz={image_size}"
	_DEFAULT_IP = "192.168.1.1"
	_MEDIUM_IMAGE_SIZE = "medium"

	def __init__( self, **options ):
		self._user_ip = ImageRipper._DEFAULT_IP
		if 'image_size' in options:
			self._image_size = options[ 'image_size' ]
		else:
			self._image_size = ImageRipper._MEDIUM_IMAGE_SIZE

		self._proxy = type( 'Proxy', ( object, ), {} )
		proxy = options[ 'proxy' ]
		setattr( self._proxy, 'get', staticmethod( proxy[ 'get' ] ) )
		setattr( self._proxy, 'delete', staticmethod( proxy[ 'delete' ] ) )
		setattr( self._proxy, 'count', staticmethod( proxy[ 'count' ] ) )

	@staticmethod
	def _proxy( d ):
		proxy = urllib2.ProxyHandler( d )
		opener = urllib2.build_opener( proxy )
		urllib2.install_opener( opener )		
	
	def use_proxy( self ):
		self._addr = self._proxy.get()
		print "ip={0}".format( self._addr )
		ImageRipper._proxy( { 'http': "http://{0}/".format( self._addr ) } )
		#self._user_ip = ip[ : ip.find( ':' ) ] if ':' in ip else ip

	def is_proxy_list_empty( self ):
		return self._proxy.count() == 0

	def _remove_cur_proxy( self ):
		self._proxy.delete( self._addr )
		print "remove bad proxy"

	def restore_proxy( self ):
		# currently the function does not restore self._user_ip
		ImageRipper._proxy( {} )

	def _get_response( self, query ):
		query_url = ImageRipper._QUERY_URL.format( query = urllib.quote( query ), user_ip = self._user_ip, image_size = self._image_size )
		request = urllib2.Request( query_url, None, _HEADERS )
				
		result = None
		while not self.is_proxy_list_empty():
			try:
				url_file_results = urllib2.urlopen( request )
			except urllib2.URLError:
				self._remove_cur_proxy()
				if not self.is_proxy_list_empty():
					self.use_proxy() # try another proxy
			except httplib.BadStatusLine:
				# don't remove proxy
				# may be we should?
				pass
			else:
				json_results = simplejson.loads( url_file_results.read() )
				result = json_results[ 'responseData' ]
				break
		return result

	def get_images_urls( self, query, num_images ):
		
		i = 0 # num of image
		urls = []
		query2 = query
		iStart = 1
		
		response = self._get_response( query2 )
		if response:
			results = response[ 'results' ]
			response_cursor = response[ 'cursor' ]
			
			if 'pages' in response_cursor:
				# found, otherwise not found
				pages = response[ 'cursor' ][ 'pages' ]
				starts = [ x[ 'start' ] for x in pages ]
				len_starts = len( starts )
	
				for result in results:
					urls.append( result[ 'url' ] )
					i += 1		
				
				while i < num_images and iStart < len_starts:
					query2 = "{0}&start={1}".format( query, starts[ iStart ] )
					response = self._get_response( query2 )
					if response != None:
						results = response[ 'results' ]
						for result in results:
							urls.append( result[ 'url' ] )
							i += 1
							if i >= num_images:
								break
						iStart += 1
					else:
						break									
		return urls
			    
if __name__ == "__main__":
	# the code was changed and now does not work
	# rip_one_image returns buffer with image but not image name
	image_ripper = ImageRipper()
	#image_ripper.rip_one_image(  "Bill Gates", "C:/Users/sergzach/Desktop/all/tests/imageripper/result/gates.jpg" )
	print repr( len( image_ripper.get_images_urls( "Bill Gates", 25 ) ) )
    
