#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
Super Search Engine is powered by python webkit headless browser
which is used for scraping AJAX-powered webpages. 


url:  the page whose source you want to get
time: wait for seconds

"""

###__version__ = '20130115'

import sys, os, string, re, glob,shutil,time
import argparse

import bs4
from bs4 import BeautifulSoup

from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtWebKit import QWebPage

SEC = 1000    # 1 sec. is 1000 msec.
USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0'


class SuperEngine(QWebPage):
    # 'html' is a class variable

    def __init__(self, url, wait, app, parent=None):
        super(SuperEngine, self).__init__(parent)
        SuperEngine.html = ''

        if wait:
            QTimer.singleShot(wait * SEC, app.quit)
        else:
            self.loadFinished.connect(app.quit)

        self.mainFrame().load(QUrl(url))

    def save(self):
        SuperEngine.html = self.mainFrame().toHtml()

    def userAgentForUrl(self, url):
        return USER_AGENT


def get_page(url, wait=None):
    # here is the trick how to call it several times
    app = QApplication.instance() # checks if QApplication already exists
    if not app: # create QApplication if it doesnt exist
        app = QApplication(sys.argv)
    #
    form = SuperEngine(url, wait, app)
    app.aboutToQuit.connect(form.save)
    app.exec_()
    return SuperEngine.html

def get_html_for_image_search(image_url, wait=None):
    #
    uri = r'http://images.google.com/searchbyimage?image_url='+image_url
    print "search uri: " + uri
    html_resp=get_page(uri,wait)
    return html_resp

def get_html_for_text_search(input_text=None, wait=None):
    #
    #example: https://www.google.com/search?btnl=1&q=Puma+Women's+Voltaic+3+NM+Running+Shoe+amazon+price
    google_server_url = r'http://www.google.com/search?'

    #conver query string as search engine format (q=w1+w2)

    formated_str = re.sub('\s+', r'+', input_text)
    formated_str = re.sub('\'', r'%27', formated_str)
    
    #TODO: add advanced search arguments from user parameters
    #Currently, I am feeling lucky
    
    query_cmd= r'q=' + formated_str + r'+amazon+price'
    
    uri = google_server_url + query_cmd
    print "search uri: " + uri
    html_resp=get_page(uri,wait)
    return html_resp

#extract mathing image name from html string a html file
def extract_match_name_for_html(html_str=None,html_file=None):
    image_name = "name is unknown"
    #print html_str
    #print html_file

    #make a soup from html string or html file
    if html_str:
        soup = soup4 = BeautifulSoup(html_str)
    elif html_file:
        soup = BeautifulSoup(open(html_file))
    else:
        raise Exception('parameter error')
            
    
    #find the name from html document tree recursively
    for div_soup in soup.find_all ('div'):
        if len(div_soup.contents) >=2:
            #print(div_soup.contents[0].string)
            #print (len(div_soup.contents))
            #print(div_soup.contents)
        
            str_0 = div_soup.contents[0].string
            if type(str_0) == bs4.element.NavigableString:
                #str_0 = str_0.strip(' \n\r\t').strip(u'\xa0')
                str_0 = str_0.strip(' \n\r\t')
                if re.search("Best guess for this image",str_0):
                    print "We find it..."
                    str_1 = div_soup.contents[1].string
                    if type(str_1) == bs4.element.NavigableString:
                        str_1 = str_1.strip(' \n\r\t')
                        #Best guess for this image
                        print str_0
                        #image_name
                        print str_1
                        image_name = str_1
    #return the best guess name, we will return a name list later
    return image_name

# main function to test the module

def main(argv=None):
    if argv is None:
      argv = sys.argv
    print argv
    
    reload(sys) 
    sys.setdefaultencoding('utf-8')
    
    # parse the command line arguments and options
    parser = argparse.ArgumentParser()
    parser.add_argument("-url", "--image_url", help="the url of image will be searched")
    parser.add_argument("-time", "--delay_time", \
		      type=int, choices=xrange(1, 11),\
		      default=2, \
		      help="the delay time to get the full response from outside ")
    args = parser.parse_args()   


    # default value for test purpose
    if args.image_url:
      image_url    =args.image_url
    else:
      image_url = "empty"
	
    delay_time   =args.delay_time

    print "Image address is : " + image_url
    print "Delay time is : " + str(delay_time)

     
    #image_url = r'http://ecx.images-amazon.com/images/I/51XsTd-0xjL._SL135_.jpg'
    #image_url = r'http://ec2-184-169-228-174.us-west-1.compute.amazonaws.com:8000/tempImages/testPic.jpeg'
    #image_url = r'http://ec2-184-169-228-174.us-west-1.compute.amazonaws.com:8000/tempImages/watch.jpg'
    image_url = r'http://ecx.images-amazon.com/images/I/31DNBOWDTbL._SL500_AA300_.jpg'
    #image_url = r'http://l4.zassets.com/images/z/2/2/0/6/3/1/2206312-p-MULTIVIEW.jpg'
    #image_url = r'http://a2.zassets.com/images/z/2/0/1/6/7/7/2016779-p-MULTIVIEW.jpg'
    #image_url = r'http://www.bestbuy.ca/multimedia/Products/500x500/102/10203/10203452.jpg'
    #image_url = r'http://i.ebayimg.com/t/BRASH-Black-Suede-KOSMIC-PLATFORM-PUMP-5-Heel-SZ-11-NIB-/00/s/NDkwWDQ5MA==/$(KGrHqRHJBIE+LfCvYnpBQF+K63MKQ~~60_12.JPG'
    #image_url = r'http://i.ebayimg.com/t/GUCCI-Metallic-Brown-Cruise-Leather-Chain-Straps-Shoulder-Handbag-/00/s/NDgxWDM4NQ==/$T2eC16N,!w0E9szN(mhOBQ9vpTusfQ~~60_12.JPG'


    
##    htmlstr = get_html_for_image_search(image_url, delay_time)
##
##       
##    
##    #print htmlstr
##    out_html = "googleImageSearchResp.html"
##    try:  # open output xml file
##      out_file = open (out_html, 'w')
##	      
##    except Exception,err:
##      print Exception, err, " open ", out_html, "failed!"
##      sys.exit(1)
##    #print out_file htmlstr     
##    out_file.write(htmlstr)
##    out_file.close()
##
##    guess_name = extract_match_name_for_html(html_str=htmlstr,html_file=None)
##    print "Best guess for this image: " + guess_name
    guess_name = extract_match_name_for_html(html_str=None,html_file="googleImageSearchResp.html")
    print "Best guess for this image: " + guess_name
    #extract_match_name_for_html(html_str=None,html_file=None)

    #price_list_html = get_html_for_text_search(input_text=guess_name, wait=delay_time)
    price_list_html = get_html_for_text_search(input_text=guess_name, wait=2)
    out_html = "googlePriceListResp.html"
    try:  # open output xml file
      out_file = open (out_html, 'w')
	      
    except Exception,err:
      print Exception, err, " open ", out_html, "failed!"
      sys.exit(1)
    #print out_file htmlstr     
    out_file.write(price_list_html)
    out_file.close()
    

# 
# end of module

if __name__ == "__main__":
    sys.exit(main())  
