#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
Super Search Engine is powered by python webkit headless browser
which is used for scraping AJAX-powered webpages. 


url:  the page whose source you want to get
time: wait for seconds

"""


import sys, os, string, subprocess, re, glob,shutil,time
import argparse

import bs4
from bs4 import BeautifulSoup


#extract the first mathcing images if there are no best guess image
def find_first_matching_image(soup=None):
    fisrt_matching_name = "unknown"
    try:  # try to find there is a mathing list
      div_soup = soup.find("div", id="ires")
      if div_soup:
            ol_soup=div_soup.ol
            children = ol_soup.contents
            l2 = children[2]
            a = l2.find_all("a")
            a1 = a[1]
            fisrt_matching_name = a1.text
            	      
    except Exception,err:
      print Exception, err, " can not find matching images list"
    
    return fisrt_matching_name

#extract the best guess of image
#extract mathing image name from html string or html file
def find_best_guess_image(soup=None):
    best_guess_name = "unknown"
    
    topstuff_soup = soup.find("div", id = "topstuff")
    if topstuff_soup:
	div_soups = topstuff_soup.contents
        number = len(div_soups)
        if number >= 3:
    	    best_guess_soup = div_soups[2]
            text_str = best_guess_soup.text
            print "best_guess_soup.text: ", text_str
            
            if text_str.find(u'Best guess') !=-1:
                a = best_guess_soup.a
                best_guess_name = a.text
            
    return best_guess_name



#extract mathing image name from html string or html file
def extract_match_name_for_html(html_str=None,html_file=None):
    image_name = "unknown"
    #print html_str
    #print html_file

    #make a soup from html string or html file
    if html_str:
        soup = BeautifulSoup(html_str)
    elif html_file:
        soup = BeautifulSoup(open(html_file))
    else:
        raise Exception('parameter error')
    
    topstuff_soup = soup.find("div", id = "topstuff")
    if topstuff_soup:
	#find best guss image if there is one
	image_name = find_best_guess_image(soup)
	if image_name == "unknown":
	    #if there is not best guess, try to find the firt matching
	    image_name = find_first_matching_image(soup)
        
    #return the best guess name, or the first matching one
    return image_name

# this is a wrapper to call super search engine as a external executable
# to get a html response, becasue we need to deal with virtual displayer issues here
def get_search_resp_html(url=None,out_html=None,wait=None):
    #cmd = r'./super_search.py '
    #use this command when there is no X window
    cmd = r'xvfb-run python ./super_search.py '
    #use this command when in x windwon environment    
    #cmd = r'python ./super_search.py '    
    para1 = '-url '+url
    para2 = '-out '+out_html
    para3 = '-time '+str(wait)
    
    retcode = subprocess.call(cmd + " " + para1 + " " + para2 + " "+para3 , shell=True)
        
    if retcode != 0:
        print "search failed, please check the environment"
    return retcode

# main function to test the module

def main(argv=None):
    if argv is None:
      argv = sys.argv
    print argv
    
    reload(sys) 
    sys.setdefaultencoding('utf-8')
    
    # parse the command line arguments and options
    parser = argparse.ArgumentParser()
    parser.add_argument("-url", "--image_url", help="the url of image will be searched")
    parser.add_argument("-out", "--out_html", help="the response html")
    parser.add_argument("-time", "--delay_time", \
		      type=float, \
		      default=2, \
		      help="float type the delay time to get the full response from outside ")
    args = parser.parse_args()   
   
    
     # default value for test purpose
    if args.image_url:
      image_url    =args.image_url
    else:
      image_url = "empty"

    if args.out_html:
        resp_html = args.out_html
    else:
        resp_html = "googleImageSearchResp.html"
	
    delay_time   =args.delay_time

    print "Image address is : " + image_url
    print "Delay time is : " + str(delay_time)
    print "output html is: " + resp_html

     
    #image_url = r'http://ecx.images-amazon.com/images/I/51XsTd-0xjL._SL135_.jpg'
    #image_url = r'http://ec2-184-169-228-174.us-west-1.compute.amazonaws.com:8000/tempImages/testPic.jpeg'
    #image_url = r'http://ec2-184-169-228-174.us-west-1.compute.amazonaws.com:8000/tempImages/watch.jpg'
    #image_url = r'http://ecx.images-amazon.com/images/I/31DNBOWDTbL._SL500_AA300_.jpg'
    #image_url = r'http://l4.zassets.com/images/z/2/2/0/6/3/1/2206312-p-MULTIVIEW.jpg'
    #image_url = r'http://a2.zassets.com/images/z/2/0/1/6/7/7/2016779-p-MULTIVIEW.jpg'
    #image_url = r'http://www.bestbuy.ca/multimedia/Products/500x500/102/10203/10203452.jpg'
    #image_url = r'http://i.ebayimg.com/t/BRASH-Black-Suede-KOSMIC-PLATFORM-PUMP-5-Heel-SZ-11-NIB-/00/s/NDkwWDQ5MA==/$(KGrHqRHJBIE+LfCvYnpBQF+K63MKQ~~60_12.JPG'
    #image_url = r'http://i.ebayimg.com/t/GUCCI-Metallic-Brown-Cruise-Leather-Chain-Straps-Shoulder-Handbag-/00/s/NDgxWDM4NQ==/$T2eC16N,!w0E9szN(mhOBQ9vpTusfQ~~60_12.JPG'


    
    #temp_html_file = "search_resp.html"

    # call search command and generate response html
    ret = get_search_resp_html(url=image_url,out_html=resp_html,wait=delay_time)

    guess_name = "unknown"
    if ret == 0:
        print "search successful, and parsing the name..."
        guess_name = extract_match_name_for_html(html_str=None,html_file=resp_html)
    else:
        print "search failed!"
        
    print "Best guess for this image: " + guess_name
   

if __name__ == "__main__":
    sys.exit(main())  
