import urllib2
import urllib
import simplejson as json

from BeautifulSoup import BeautifulSoup, SoupStrainer

import ParseHelpUtils

#===================================================================

def lookup_google(place):
    if not place:
        return {}

    base_url = "http://maps.google.com/maps/api/geocode/json?address="
    geocode_url = base_url + urllib.quote(place) + "&sensor=false"
        
    try:
        contents = get_content(geocode_url)
        google_response = json.loads(contents, encoding="utf-8")
        
        if google_response['status'] != "OK":
            return {}

    except urllib2.URLError, ue:
        raise urllib2.URLError

    except (KeyError) as (errno, strerror):
        raise KeyError
        
    results = google_response['results']

    # There's lot more information in Google's result. I don't know
    # if I need them. Google actually returns multiple results. I'm just
    # taking the first one. Not the best way.
    location = results[0]['geometry']['location'] # A dict
    
    return location

#===================================================================

def is_proxy_enabled():
    return True
    #return False

#===================================================================

def get_content(url):
    
    req = urllib2.Request(url)
    req.add_header('User-Agent', 'today_bot')
    
    if is_proxy_enabled() == True:
        proxy_info = {
                      'user' : '10332',
                      'pass' : '2011June123*',
                      #'host' : '172.16.2.10',
                      'host' : 'cwybcproxy.us.dnb.com',
                      #'port' : 3128
                      'port' : 8080
                      }

        # build a new opener that uses a proxy requiring authorization
        proxy_support = urllib2.ProxyHandler({"http" : \
                                              "http://%(user)s:%(pass)s@%(host)s:%(port)d" 
                                              % proxy_info})
        opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)

        # install it
        urllib2.install_opener(opener)

    r = urllib2.urlopen(req)
    contents = r.read()

    return contents.decode("utf-8")


#===================================================================
def extract_events(wikipedia_html):
    unordered_lists = SoupStrainer('ul') # <ul> tag
    ultags = [tag for tag in BeautifulSoup(wikipedia_html, parseOnlyThese=unordered_lists)]

    events = str(ultags[1]).split("\n") # breaks for Jan 1. fix it
    events_html = "\n".join(events).decode("utf-8")
    event_list = ParseHelpUtils.text_of(events_html).splitlines()
    
    return event_list

#===================================================================

def extract_births(wikipedia_html):
    unordered_lists = SoupStrainer('ul') # <ul> tag
    ultags = [tag for tag in BeautifulSoup(wikipedia_html, parseOnlyThese=unordered_lists)]

    births = str(ultags[2]).split("\n") # breaks for Jan 1. fix it
    births_html = "\n".join(births).decode("utf-8")
    births_list = ParseHelpUtils.text_of(births_html).splitlines()
    
    return births_list
#===================================================================
