import urllib2
import re
import logging
import time
from waveapi import document
from BeautifulSoup import SoupStrainer, BeautifulSoup

# I usually distinguish globals using upper case.
BASE_URL = 'http://en.wikipedia.org/wiki/Special:Search/'
# Probably shouldn't spoof a web-browser here.
USER_AGENT = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
HEADERS = { 'User-Agent' : USER_AGENT }
NUMPARAS = 2
NUMPOINTS = 5
BULLET = '-'
OPENTAG = '{{'
OPENTAG_LEN = len(OPENTAG)
CLOSETAG = '}}'
CLOSETAG_LEN = len(CLOSETAG)
GREETING = "Enclose a search phrase with %s %s," % (OPENTAG, CLOSETAG) + \
           " and I'll give you the definition from Wikipedia!"
# Compile the regular expresions once.
# This is supposed to help performance, especially since each reg. exp is used
# over and over again. See http://www.python.org/doc/2.5.4/lib/node46.html
RE_TAG = re.compile(OPENTAG + '(.*?)' + CLOSETAG) # Get {{This}} in blip
RE_GET_BRACES = re.compile(OPENTAG + '|' + CLOSETAG) # Match the braces

# Annotations
BOLD = {"style/fontWeight": "bold"}
ITALIC = {"style/fontStyle": "italic"}
YELLOW_BACKGROUND = {"style/backgroundColor": "rgb(255, 229, 0)"}
LINK = {"link/manual": "http://www.google.com"}

def OnBlipSubmitted(properties, context):
  blip = context.GetBlipById(properties['blipId'])
  blip_document = blip.GetDocument()
  blip_document_contents = blip_document.GetText()
  search_terms = RE_TAG.findall(blip_document_contents)
  if search_terms:
    wavelet = context.GetRootWavelet()
    remove_tags(blip_document, blip_document_contents, search_terms)
    for term in search_terms:
      raw_html = wiki_search(term)
      Blip_Contents = SoupParse(raw_html)
      create_blip(wavelet, Blip_Contents)

def remove_tags(blip_document, blip_document_contents, search_terms):
  """Remove the open and close tag characters without resetting the entire
  document text"""
  current_blip_contents = blip_document_contents
  for term in search_terms:
    term_start_index = current_blip_contents.find(OPENTAG+term+CLOSETAG)
    # REMEMBER. By this point, the document is missing the open tag! Need to
    # reduce our guess of where the close tag is by the number of characters
    # deleted in the first request.
    term_end_index = term_start_index + len(term)
    open_tag_range = document.Range(term_start_index, term_start_index + OPENTAG_LEN)
    close_tag_range = document.Range(term_end_index, term_end_index + CLOSETAG_LEN)
    current_blip_contents 
    blip_document.DeleteRange(open_tag_range)
    blip_document.DeleteRange(close_tag_range)
    annotate_range(blip_document, (term_start_index, term_end_index), BOLD, 
                   ITALIC)
    current_blip_contents = current_blip_contents[:term_start_index] + \
                            current_blip_contents[term_start_index+OPENTAG_LEN:term_end_index+OPENTAG_LEN] + \
                            current_blip_contents[term_end_index+OPENTAG_LEN+CLOSETAG_LEN:]

def annotate_range(blip_document, range, *annotations):
    """annotate range.  
    Range: tuple (start, end)
    *annotations: dictionaries of annotations"""
    annotate_range = document.Range(*range)
    for annotation in annotations:
        name, value = annotation.items()[0]
        blip_document.SetAnnotation(annotate_range, name, value)

class SoupParse:
  def __init__(self, raw_html, numparas=NUMPARAS, numpoints=NUMPOINTS, 
               img_width=100):
    """Take raw wikipedia html, return desired blip text"""
    # All we are interested in is the bodyContent div.
    body_content = SoupStrainer('div', id="bodyContent")
    wsoup = BeautifulSoup(raw_html, parseOnlyThese=body_content, 
                          smartQuotesTo=None)
  
    first_two_paragraphs = wsoup.div.findAll('p', limit=NUMPARAS, recursive=False)
    blip_text = "\n\n".join(map(html_to_string, first_two_paragraphs))
                        
    # Test if we've captured a disambigulation page.
    if "refer to" in blip_text:
      # This is a disambigulation page.
      # Get all the p and ul tags.
      # Need to get h2, h3 tags also, because that is how some pages are layed out.
      all_p_and_ul = wsoup.div.findAll({'p': True, 'ul': True}, recursive=False)
      blip_text = ""
      for t in all_p_and_ul:
        if t.name == 'p':
          blip_text += "%s\n\n" % html_to_string(t) 
        if t.name == 'ul':
          blip_text += "-%s\n\n" % "\n-".join(map(html_to_string, \
                                              t.findAll('li', limit=NUMPOINTS)))
    self.blip_text = blip_text
    self.img = None
    self.img_width = img_width
    self.img_caption = ""
    first_image = wsoup.div.find(lambda tag: tag.name=='img' and int(tag['width']) > 50)
    if first_image:
      img = first_image['src']
      self.img = re.sub('\/\d+?px', '/%dpx' % img_width, img)
      self.img_caption = first_image['alt']
      

def OnRobotAdded(properties, context):
  """Invoked when the robot has been added."""
  root_wavelet = context.GetRootWavelet()
  root_wavelet.CreateBlip().GetDocument().SetText(GREETING)

def wiki_search(phrase):
  """Take Search term and return raw html from wikipedia"""
  # It might make sense to do a little caching here.  Once you get a definition
  # from wikipedia, store what you send to the agent in the app engine data
  # store.
  search_url = BASE_URL + phrase.replace(' ', '_')
  request = urllib2.Request(search_url, None, HEADERS)
  response = urllib2.urlopen(request)
  result = response.read()
  return result

def create_blip(parent_wavelet, Blip_Contents):
  if len(Blip_Contents.blip_text) > 0:
    new_blip = parent_wavelet.CreateBlip()
    logging.debug(Blip_Contents.blip_text)
    new_document = new_blip.GetDocument()
    new_document.SetText(Blip_Contents.blip_text)
    if Blip_Contents.img:
        new_document.InsertElement(0, document.Image(Blip_Contents.img, 
                                   width=Blip_Contents.img_width, 
                                   caption=Blip_Contents.img_caption))

def html_to_string(t):
  """Take a beautifulsoup tag instance, and return all the text nested
  within."""
  s = t.string # Either a BeautifulSoup.NavigableString or None
  if s is not None:
    return unicode(s).encode('utf-8', 'ignore')
  return "".join(map(html_to_string, t))
