import pickle, os, sys, re
import xml.dom.minidom, htmlentitydefs
import nltk

CATEGORIES = (
  "General",
  "People",
  "Objects",
)

DATES = (
  "Dates",
)

EXCLUDE = (
  "Bad Cases",
  "Finished",
)

def debug(*strings):
  for s in strings:
    print s,
  print

fin = open(os.path.join("data", "pages.list"), 'r')
categories = {}
current = None
for line in fin:
  line = line.strip()
  if line[:2] == '--' and line[-2:] == '--':
    current = line[2:-2]
    if current not in categories:
      categories[current] = []
  else:
    categories[current].append(line)
fin.close()

POS_TAGGER = "taggers/maxent_treebank_pos_tagger/english.pickle"

def getDivChildren(node):
  return [c for c in node.childNodes
          if hasattr(c, 'tagName') and c.tagName == "div"]

def stripSups(node):
  for child in node.childNodes:
    if child.nodeName == u'sup':
      node.removeChild(child)
    else:
      stripSups(child)

def chopTail(thing, where):
  tail = thing
  index = 0
  while where in tail[len(where):]:
    index += tail[len(where):].index(where) + len(where)
    tail = tail[tail[len(where):].index(where) + len(where):]
  if index > 0:
    return thing[:index]
  else:
    return thing

def retag(sentence):
  retagged = []
  for item in sentence:
    if isinstance(item, nltk.tree.Tree):
      typ = item.node
      contents = item.leaves()
      retagged.append(
        (' '.join(word for word, tag in contents),
         "ENT-" + typ)
      )
    elif item[0] == '(':
      retagged.append(('(', '('))
    elif item[0] == ')':
      retagged.append((')', ')'))
    else:
      retagged.append(item)
  return retagged

tagger = nltk.data.load(POS_TAGGER)
labels = tagger.classifier().labels()
# Labels are defined online at:
# http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
fin = open(os.path.join('data', 'basic.fcfg'), 'r')
grammar = nltk.grammar.parse_fcfg(fin.read())
fin.close()
parser = nltk.parse.earleychart.FeatureEarleyChartParser(grammar)

def strip_matched(start, end, data):
  while start in data:
    first = data.index(start)
    last = first + data[first:].index(end) + len(end)
    data = data[:first] + data[last:]
  return data

def get_clean_body(data):
  stripped = data[data.index("<body "):data.index("</body>")] + "</body>"
  data = "<html>" + stripped + "</html>"
  data = strip_matched("<script", "</script>", data)
  data = strip_matched("<form", "</form>", data)
  data = strip_matched("<img", ">", data)
  data = data.replace('&nbsp;', ' ')
  data = re.sub('<br[^>]*>', '<br/>', data)

  return data

def cleanup_bs(html):
  html = html.replace('</div>', '.')
  html = html.replace('</h2>', '.')
  html = html.replace('</h3>', '.')
  html = html.replace('</td>', '.')
  html = html.replace('</p>', '.')
  html = html.replace('</li>', '.')
  codepoints = re.findall(r'&#(\d+);', html)
  entities = re.findall(r'&[^;]+;', html)
  for c in codepoints:
    html = html.replace(c, unichr(int(c[2:-1])))
  for e in set(entities):
    if e[1:-1] in htmlentitydefs.name2codepoint:
      codepoint = htmlentitydefs.name2codepoint[e[1:-1]]
      replacement = unichr(codepoint).encode('ascii', 'ignore')
      html = html.replace(e, replacement)
 
  result = nltk.util.clean_html(html)
  result = result.replace('[ edit ]', '.')
  result = result.replace('[ change ]', '.')
  return result

def process(*pages):
  for page in pages:
    fin = open(os.path.join('data', 'raw', page), 'r')
    data = fin.read()
    fin.close()

    if data[:9] == "CATEGORY:":
      category = data[:data.index('\n')][9:]
      data = data[data.index('\n') + 1:]

    data = get_clean_body(data)

    debug('Parsing:', page)
    doc = xml.dom.minidom.parseString(data)
 
    title = doc.getElementsByTagName('h1')[0].firstChild.nodeValue
    debug('Title:', title)
 
    body = doc.getElementsByTagName('body')[0]
 
    maindiv = getDivChildren(body)[0]
 
    content = getDivChildren(maindiv)[0]

    stripSups(content)
 
    html = content.toxml().encode('ascii', 'ignore')
    doc.unlink()

    result = cleanup_bs(html)
    result = chopTail(result, 'Other pages')
    result = chopTail(result, 'References')
    while '..' in result:
      result = result.replace('..', '.')
 
    sents = [nltk.tokenize.word_tokenize(s.strip())
             for s in nltk.tokenize.sent_tokenize(result) if len(s) > 3]
 
    # (useful) HACK!
    dates = []
    for i, s in enumerate(sents):
      if len(s) < 4:
        continue
      if re.match(r"\d+s?", s[0]):
        if s[1] == ',':
          date = { 'when': s[0] }
          event = s[2:]
        elif s[1] == '-' and re.match(r"\d+s?", s[2]) and s[3] == ',':
          date = {
            'start': s[0],
            'end': s[2],
          }
          event = s[4:]
        else:
          continue
        del sents[i]
        dates.append((date, event))

    for i, listed in enumerate(dates):
      date, event = listed
      tagged = tagger.tag(event)
      chunked = nltk.chunk.ne_chunk(tagged)
      nametagged = retag(chunked)
      tags = []
      words = []
      for word, tag in nametagged:
        words.append(word)
        tags.append(tag)
      tree = parser.parse(tags)
      dates[i] = {
        'date': date,
        'words': words,
        'tags': tags,
        'tree': tree,
      }
      # DEBUG
      #debug(date)
      #debug(words)
      #debug(tags)
      #debug(parsed)
      # /DEBUG
    # /HACK
 
    tagged = tagger.batch_tag(sents)

    chunked = nltk.chunk.batch_ne_chunk(tagged)
    for i, s in enumerate(chunked):
      chunked[i] = retag(s)

    tags = []
    words = []
    for sent in chunked:
      words.append([])
      tags.append([])
      for word, tag in sent:
        words[-1].append(word)
        tags[-1].append(tag)

    # DEBUG
    #while True:
    #  I = int(raw_input("Sentence (max=%d):" % len(tags)))
    #  debug(tags[I])
    #  debug(words[I])
    #  debug(parser.parse(tags[I]))
    #exit(1)
    # /DEBUG
    trees = parser.batch_parse(tags)

    items = []
    for i, sentence in enumerate(words):
      items.append({
        'words':sentence,
        'tags':tags[i],
        'tree':trees[i],
      })
    # DEBUG
    debug("Total sentences:", len(items))
    debug("Parsed sentences:", len([i for i in items if i['tree'] != None]))
    # /DEBUG
 
    fout = open(os.path.join('data', 'parsed', page + '.pkl'), 'w')
    pickle.dump({
      'title': title,
      'sentences': items,
      'listed': dates,
      'births': None,
      'deaths': None,
    }, fout)
    fout.close()

def process_dates(*dates):
  # (useful) HACK!
  for page in dates:
    fin = open(os.path.join('data', 'raw', page), 'r')
    data = fin.read()
    fin.close()

    if data[:9] == "CATEGORY:":
      category = data[:data.index('\n')][9:]
      data = data[data.index('\n') + 1:]

    data = get_clean_body(data)

    debug('Parsing:', page)
    doc = xml.dom.minidom.parseString(data)
 
    title = doc.getElementsByTagName('h1')[0].firstChild.nodeValue
    debug('Title:', title)
 
    body = doc.getElementsByTagName('body')[0]
 
    maindiv = getDivChildren(body)[0]
 
    content = getDivChildren(maindiv)[0]

    stripSups(content)
 
    html = content.toxml().encode('ascii', 'ignore')
    doc.unlink()
    ie = -1
    ib = -1
    id = -1
    ilast = -1
    if 'id="Events"' in html:
      ie = html.index('id="Events"')
    if 'id="Births"' in html:
      ib = html.index('id="Births"')
    if 'id="Deaths"' in html:
      id = html.index('id="Deaths"')
    if '<div class="printfooter">' in html:
      ilast = html.index('<div class="printfooter">')
    if '<div class="notice plainlinks">' in html:
      ilast = html.index('<div class="notice plainlinks">')
    if ib < 0:
      ib = ilast
    if id < 0:
      id = ilast
    events = html[ie:ib]
    births = html[ib:id]
    deaths = html[id:ilast]

    date = {
      'when': title
    }

    if events:
      if ib != ilast:
        events = events[events.index('</span>') + 7:events.rindex('<span')]
      else:
        events = events[events.index('</span>') + 7:]

      events = cleanup_bs(events)

      while '..' in events:
        events = events.replace('..', '.')

      ev_sents = [nltk.tokenize.word_tokenize(s.strip())
                  for s in nltk.tokenize.sent_tokenize(events) if len(s) > 3]

      dates = []
      for s in ev_sents:
        if len(s) < 3:
          continue
        dates.append((date, s))

      for i, listed in enumerate(dates):
        date, event = listed
        tagged = tagger.tag(event)
        chunked = nltk.chunk.ne_chunk(tagged)
        nametagged = retag(chunked)
        tags = []
        words = []
        for word, tag in nametagged:
          words.append(word)
          tags.append(tag)
        tree = parser.parse(tags)
        dates[i] = {
          'date': date,
          'words': words,
          'tags': tags,
          'tree': tree,
        }

    if births:
      if id != ilast:
        births = births[births.index('</span>') + 7:births.rindex('<span')]
      else:
        births = births[births.index('</span>') + 7:]

      births = cleanup_bs(births)

      births = re.sub(r'([^)]*?)', '', births)

      while '..' in births:
        births = births.replace('..', '.')

      br_sents = [nltk.tokenize.word_tokenize(s.strip())
                  for s in nltk.tokenize.sent_tokenize(births) if len(s) > 3]
 
      blist = []
      for s in br_sents:
        while s[-1] == '.':
          s = s[:-1]
        if ',' in s:
          name = ' '.join(s[:s.index(',')])
          desc = ' '.join(s[s.index(',') + 1:])
        else:
          name = ' '.join(s)
          desc = None
        blist.append((date, name, desc))

    if deaths:
      deaths = deaths[deaths.index('</span>') + 7:]

      deaths = cleanup_bs(deaths)

      deaths = re.sub(r'([^)]*?)', '', deaths)

      deaths = chopTail(deaths, 'Other pages')
      deaths = chopTail(deaths, 'References')

      while '..' in deaths:
        deaths = deaths.replace('..', '.')
 
      dth_sents = [nltk.tokenize.word_tokenize(s.strip())
                   for s in nltk.tokenize.sent_tokenize(deaths) if len(s) > 3]

      dlist = []
      for s in dth_sents:
        while s[-1] == '.':
          s = s[:-1]
        if ',' in s:
          name = ' '.join(s[:s.index(',')])
          desc = ' '.join(s[s.index(',') + 1:])
        else:
          name = ' '.join(s)
          desc = None
        dlist.append((date, name, desc))
 
    fout = open(os.path.join('data', 'parsed', page + '.pkl'), 'w')
    pickle.dump({
      'title': title,
      'sentences': [],
      'listed': dates,
      'births': blist,
      'deaths': dlist,
    }, fout)
    fout.close()
  # /HACK


def get(page):
  fin = open(os.path.join('data', 'parsed', page + '.pkl'), 'r')
  result = pickle.load(fin)
  fin.close()
  return result

if __name__ == "__main__":
  all_pages = []
  exclude = []
  date_pages = []
  for c in CATEGORIES:
    if c in categories:
      for pg in categories[c]:
        all_pages.append(pg)
  for c in DATES:
    if c in categories:
      for pg in categories[c]:
        date_pages.append(pg)
  for c in EXCLUDE:
    if c in categories:
      for pg in categories[c]:
        exclude.append(pg)
  if sys.argv[1:]:
    pages = filter(lambda c: c in all_pages, sys.argv[1:])
    process(*pages)
    dates = filter(lambda c: c in date_pages, sys.argv[1:])
    process_dates(*dates)
  else:
    pages = [p for p in os.listdir(os.path.join('data', 'raw')) if p[0] != '.']
    normal = filter(lambda c: c in all_pages and c not in exclude, pages)
    process(*normal)
    dates = filter(lambda c: c in date_pages and c not in exclude, pages)
    process_dates(*dates)
