"""
The Knowledge Ferret - Document Explorer
Copyright (C) 2010 David R. Pratten

This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.

"""
from xml.dom import minidom                                          
import MySQLdb
from operator import concat
import re
import sys
from kfconfig import *

"""
- stop the mouse over terms function from searching in image data and also inside tags and also one inside another.

- instead of homebrew seach for mis matches use query expansion of contextually close words >= 4 chars
- handle asides
- create dictionary entries
- addin css linked/ embedded in source
- extract parameters from source file
- add donotxref class to sections e.g. a recap main points section.
"""
"""
These parameters need to be extracted from the source file
-- beta flag.

"""

maxsectionlevel = 3
"""
End
"""

db=MySQLdb.connect(host=kfhost, user=kfuser, passwd=kfpasswd, db=kfdb)
c=db.cursor()
c1=db.cursor()
c2=db.cursor()

def cull_elements(parent,tagname, classname, culldoc):
  for subnode in parent.childNodes:
    if subnode.nodeType == subnode.ELEMENT_NODE and subnode.tagName == tagname and (classname=="" or subnode.getAttribute("class")==classname):
      #print subnode.toxml()
      #print culldoc.toxml()
      culldoc.firstChild.appendChild(parent.removeChild(subnode))
      #print culldoc.toxml()
    else:
      cull_elements(subnode, tagname, classname, culldoc)

def getFirstChildElement(node):
  if node.hasChildNodes():
    for n in node.childNodes:
      if n.nodeType == n.ELEMENT_NODE:
        return n
  return False
  
"""
CHANGE: outliner/extractor 
Every outline consists of a <section> containing a heading element and a sequence of sections.
if a node contains ANY <section> elements IT MUST match that pattern or bomb.
"""
def outline2(section, headertext,level, doc_id,parent, ancestors, ancestor_titles, docname):
  firstChildElement = getFirstChildElement(section)
  if not firstChildElement or firstChildElement.localName == "section" :
    print "nodeType %d localName %s content '%s'" % (firstChildElement.nodeType, firstChildElement.localName, firstChildElement.toxml())
    print "Section without header"
    #print '%s' % section.toxml()
    exit(3)
  level += 1
  #print '<li>%s' % section.firstChild.firstChild.toxml()
  foundsection = False
  i = 0
  headersofar = headertext
  if firstChildElement.localName not in ('hgroup','header','h1','h2','h3','h4','h5','h6','p'):
      print "Heading must be one of 'hgroup','header'.'h1','h2','h3','h4','h5','h6','p'"
      print '%s' % firstChildElement.toxml()
      exit(23)
  if firstChildElement.localName == 'p':
    runoftext = 1
  else:
    runoftext = 0
  if len(section.getElementsByTagName('section')) and level <= maxsectionlevel: #has subsections and not too deep
    thisheader = firstChildElement.toxml()
    headersofar = '%s%s' % (headersofar,thisheader)
    #print firstChildElement.localName
    parent = c.execute("""INSERT INTO pages (level,title,content,leaf, document_id, parentpage_id, ancestors, ancestor_titles, permalink, runoftext) values (%s, %s, %s, false, %s, %s, %s, %s, if(%s<>'',%s,NULL),%s);""", (level, firstChildElement.firstChild.toxml(), '' ,doc_id,parent,', '.join([str(a) for a in ancestors]),', '.join([a for a in ancestor_titles]),section.getAttribute('id'),'%s-%s' % (docname, section.getAttribute('id')),runoftext))
    new_parent = db.insert_id()
    new_ancestors = concat(ancestors, (new_parent,) )
    new_ancestor_titles = concat(ancestor_titles, (firstChildElement.firstChild.toxml(),) )
    for n in section.childNodes:
  #    print "nodeType %d localName %s content '%s'" % (n.nodeType, n.localName, n.toxml())
      if n.nodeType == n.ELEMENT_NODE:
        i += 1
      if i == 1 and n.nodeType == n.ELEMENT_NODE and n.localName == "section":
        # missing header
        print "Missing Heading"
        #print '%s' % n.toxml()
        exit(1)
      if i <> 1 and n.nodeType == n.ELEMENT_NODE and n.localName <> "section":
        # missing once started only sections allowed
        print "After heading, only sections  allowed"
        print "nodeType %d localName %s content '%s'" % (n.nodeType, n.localName, n.toxml())
        #print '%s' % n.toxml()
        exit(2)
      if n.nodeType == n.ELEMENT_NODE and n.localName == "section":
        #print '<ul>'
        #print "nodeType %d localName %s content '%s'" % (n.nodeType, n.localName, n.toxml())
        #print '<li>%s' % n.firstChild.firstChild.toxml()
        headersofar = outline2(n, headersofar, level, doc_id,new_parent, new_ancestors, new_ancestor_titles, docname)
        #print '</ul>'
  else: #this section did not contain any subsections
    # replace in-document links with external links
    for a in section.getElementsByTagName('a'):
      m = re.match(r"^#(.+)$", a.getAttribute("href"))
      if m <> None:
        a.setAttribute("href","?d=%s&a=%s"%(doc_id,m.group(1)))
    # output a visible section into the database that includes accumulated and not yet output headers
    content = '%s%s' % (headersofar, ''.join([n.toxml() for n in section.childNodes]))
    #print firstChildElement.toxml()
    c.execute("""INSERT INTO pages (level,title,content, leaf, document_id, parentpage_id, ancestors, ancestor_titles, permalink,runoftext) values (%s, %s, %s, true, %s, %s, %s, %s,  if(%s<>'',%s,NULL),%s);""", (level, firstChildElement.firstChild.toxml(), content, doc_id,parent, ', '.join([str(a) for a in ancestors]), ', '.join([a for a in ancestor_titles]),section.getAttribute('id'),'%s-%s' % (docname, section.getAttribute('id')),runoftext))
    page_id = db.insert_id()
    #print record anchors for this page
    for a in section.getElementsByTagName('a'):
      if a.getAttribute("id")<>"":
        c.execute("""INSERT INTO anchors (document_id, page_id, anchor) values (%s, %s, %s);""", (doc_id, page_id, a.getAttribute("id")))
    #print db.insert_id()
    headersofar = ''
  #print '</li>'
  return headersofar

"""
    print 'AAAAAAAAAAAAAAA%s' % (headersofar)
    for n in section.childNodes:
      print n.toxml()
    print 'ZZZZZZZZZZZZZZZ' 
"""

""" Load one or more documents from a source xml file """
def outline(sourcexml):
  for n in sourcexml.getElementsByTagName('article'): # load one or more articles from the source file
    if n.getAttribute('id'): # each attribute must have an id to uniquely identify it.
      docname = n.getAttribute('id')  
      
      print "select insert find this document's id"
      c.execute("""select id from documents where title = %s;""",(docname,))
      row = c.fetchone()
      if not row:
        c.execute("""insert into documents (title, permalink) values (%s,%s);""", (docname,docname))
        doc_id = db.insert_id()
      else:
        doc_id, = row
        
      print "remove this document from the page, definitions and terms tables"
      c.execute("""delete from pages where document_id = %s;""", (doc_id,))
      c.execute("""delete from definitions using definitions, terms where definitions.id = terms.definition_id and document_id = %s;""", (doc_id,))
      c.execute("""delete from terms where document_id = %s;""", (doc_id,))
      c.execute("""delete from anchors where document_id = %s;""", (doc_id,))
      
      print "extract style information from this document"
      style = minidom.parseString('<Styles/>')
      cull_elements(n,"style", "", style)
      
      print "extract semantic markup"
      meta = minidom.parseString('<meta/>');
      cull_elements(n,"div", "documentmeta",meta)
      canonicalurl = meta.getElementsByTagName("canonicalurl")[0].firstChild.toxml()
      copyrightby = meta.getElementsByTagName("copyrightby")[0].firstChild.toxml()
      documenttitle = meta.getElementsByTagName("documenttitle")[0].firstChild.toxml()

      c.execute("""update documents set style = %s, canonicalurl = %s, copyrightby = %s, documenttitle = %s where id = %s;""", 
        (''.join([stylenode.toxml() for stylenode in style.firstChild.getElementsByTagName("style")]),
        canonicalurl,
        copyrightby,
        documenttitle,
        doc_id))
      
      print  "extract the content of this document"
      outline2(n,'',0,doc_id,0,(0,),('',),docname)
      
      print "create the similarity scores"
      c.execute("""select id,content,parentpage_id from pages where document_id = %s;""", (doc_id,))
      for row in c:
        page_id,content, parent = row
        words = ' '.join(re.split('\W+', content))
        #print words[:100]
        c1.execute("""SELECT group_concat(id ORDER BY id SEPARATOR ', ')
          FROM (select id from pages WHERE id<>%s and not runoftext and parentpage_id<>%s AND MATCH (content) AGAINST (%s) LIMIT 5) r;""", (page_id,parent,words))
        row1 = c1.fetchone()
        related, = row1
        #print related
        c1.execute("""update pages set related = %s where id = %s""",(related,page_id))
        
      #
      print "update the permalinks in pages"
      c.execute("""update pages set permalink = md5(concat(title,ancestor_titles)) where permalink is null;""")

      print "update the next and previous pages"
      c.execute("""CREATE TEMPORARY TABLE pagex SELECT id, permalink, document_id, runoftext FROM pages;""")
      c.execute("""UPDATE pages SET nextleaf = COALESCE((SELECT p2.permalink FROM pagex p2 WHERE not p2.runoftext AND p2.id > pages.id AND p2.document_id = %s ORDER BY id LIMIT 1),'')  ;""", (doc_id,))
      c.execute("""UPDATE pages SET prevleaf = COALESCE((SELECT p2.permalink FROM pagex p2 WHERE not p2.runoftext AND p2.id < pages.id AND p2.document_id = %s ORDER BY id DESC LIMIT 1),'')  ;""", (doc_id,))
      c.execute("""DROP TEMPORARY TABLE IF EXISTS pagex;""")


      print "update permalinks to point to the new page"
      c.execute("""insert into permalinks (id, popularity, page_id) select permalink, 0, id from pages p on duplicate key update page_id = p.id ;""")
      #
      print "collect the definitions"
      for n in sourcexml.getElementsByTagName('dl'):
        dt = None
        dd = None
        for n1 in n.childNodes:
          if n1.nodeType == n1.ELEMENT_NODE and n1.localName == "dt":
            dt = n1.firstChild.toxml()
            dd = None # dt must precede dd
          if n1.nodeType == n1.ELEMENT_NODE and n1.localName == "dd":
            dd = n1.firstChild.toxml()
          if dt <> None and dd <> None:
            c.execute("""insert into definitions (definition) values (%s);""", (' '.join(re.split('[\W-]+', dd)),))
            definition_id = db.insert_id()
            c.execute("""insert into terms (document_id, term, definition_id) values (%s,%s, %s);""", (doc_id,' '.join(re.split('[\W-]+', dt)),definition_id))
            dt = None
            dd = None
      # 
      print "put spans around the dt s in the document."
      #c.execute("""select term, definition from terms t, definitions d where t.definition_id = d.id and document_id = %s order by length(term) desc  ;""", (doc_id,))
      #c1.execute("""select id,content from pages where document_id = %s;""", (doc_id,))
      #for row1 in c1:
      #  page_id, content, = row1
      #  new_content = content
      #  for row in c:
      #    term,definition = row
      #    filtereddefinition = ' '.join(re.split('[\W\.\;\:\,]+', definition))
      #    new_content = re.sub(term, '<span class="dt" title="%s">%s</span>' % (filtereddefinition, term), new_content)
      #  c2.execute("""update pages set content = %s WHERE id=%s;""", (new_content, page_id,))

    else:
      print 'Article does not have an id'  

xmldoc = minidom.parse(sys.argv[1]) # use the first command line parameter as the file name.
# populate the outline
outline(xmldoc)  

#print xmldoc.toxml()  