# -*- coding: utf-8 -*-
"""
Common parse routines for RDFW using beautiful soup...
"""

import urllib2
from BeautifulSoup import BeautifulSoup, NavigableString, Tag, Comment
from copy import deepcopy

def sequenceATags(soup, sequence_tags=None, tagId='rdfw_seq'):
    """
    For some rdfw operations it's important to know the relative position in the 
    source HTML of the rdfw tags.
    This method applies a unique, sequence attribute to each tag of interest, in the order they appear
    """
    #soup = findFirstAncestor(soup,['body','li'])
    seq = 0
    for element in soup('a'):
        if element.get('href') in sequence_tags:
            #print seq,
            element[tagId] = seq
            seq +=1 
        


def findCommonAncestor(soup, startRef, stopRef):
    """
    Given two href tags and a Beautiful Soup context, find the closest common ancestor.
    Used by RDFW to find a sensible stating place for parsing RDFW content.
    Note that: 
    1) the order is important, the start tag must precede the stop tag  - IMPLEMENTED
    2) there can only be a single instance of start and stop tag in the context  - NOT IMPLEMENTED
    """
    #print 'FCA context: %s' % soup

    if not soup.get('href') == startRef: 
        soup = soup.find('a',href=startRef)
        if not soup:
            raise 'tag {%s} was not in this context' % startRef

    while True:
            if soup.parent:
                soup = soup.parent
            else:
                return soup
            #print 'FCA tag: ' , soup.name, startRef, stopRef
            sequenceATags(soup, sequence_tags=[startRef,stopRef], tagId='rdfw_seq')

            start = soup.find('a',href=startRef)
            if start:
                stops = soup.findAll('a',href=stopRef)
                for stop in stops:
                    if int(stop.get('rdfw_seq')) > int(start.get('rdfw_seq')):
                        return soup

                
def findFirstAncestor(context, types = []):
    types.append('[document]')
    try:
        while context.name not in types:#[u'html',u'ul',u'table',u'body','li','tr']:
            #print context.name
            context = context.parent
        return context
    except:
        print context
        raise 'tag from %s was found not in this context' % str(','.join(types))


def parseLit(whereabouts, startRef, stopRef):
                
        #find within all the HTML soup
        #findFirstAncestor(whereabouts,['body','li'])
        #context = deepcopy(whereabouts)
        context = BeautifulSoup(str(whereabouts))
        #context = whereabouts
        #logging.debug('Instance.parseInstance: parsing XMLLit tag: %s' % tag ) 
                    
        #findFirstAncestor(whereabouts, ['ul','body']).renderContents()
        #context = BeautifulSoup(whereabouts.findParents()[-1].renderContents())
        
        ancestor = findCommonAncestor(context, startRef, stopRef)
        mark_keepers(ancestor.contents, startRef, stopRef)
        #print 'losers'
        starter = ancestor.find('a', href=startRef)
        #make sure the parents of our parsed content don't get extracted
        for p in starter.findParents()[:-1]:
            p['keepme'] = '1'
        #Blow away the rest...
        #assert(len(whereabouts.findAll())==len(context.findAll()))
        for loser in ancestor.findAll(keepme='0'):
            loser.extract()
        #assert(len(whereabouts.findAll()) != len(context.findAll()))
        #now cleanup the 'keepme' values
        del(ancestor['keepme'])
        for itm in ancestor.findAll(keepme='1'):
            del(itm['keepme'])
        
            
        return ancestor    
            
__s = False
def mark_keepers(context, startRef, stopRef):
    global __s

    for itm in context:
        
        if type(itm) == NavigableString:
            if not __s:
                pass               
        elif type(itm) == Comment:
            itm.extract()            
        elif (itm.get('href') == startRef):
            __s = True
            itm['keepme'] = '0'          
        elif (itm.get('href') == stopRef):
            __s = False
            stopped = True
            itm['keepme'] = '0'           
        else:
            if (__s == True):
                itm['keepme'] = '1'
            else:
                itm['keepme'] = '0'
            if len(itm) > 0:
                mark_keepers(itm.contents, startRef, stopRef)
                
"""
def parseLiteralProperty(context, startRef, stopRef):
    started = stopped = starting = stopping = False
    result = []
    extractions = []
    
    frag = findCommonAncestor(context, startRef, stopRef)
    
    -""
    print 'frag'
    print frag
    print
    print 'pretty'
    print BeautifulSoup(frag.prettify())
    return ""
    -""
    
    for itm in frag:
        #print '*'
        if type(itm) == NavigableString:
            print '.', 
            if started:
                if itm not in ('\n',u'\n','\t', u'\t' ):
                    print 'adding %s |' % itm
                    result.append(itm.strip()) 
            continue
        elif type(itm) == Comment:
            continue
        else:
            try:
                if (itm.get('href') == startRef):
                    starting = itm
                    started = True
                    extractions.append(itm)
                else:
                    starting = itm.find('a',href=startRef)
                #print 'itm:', itm
                #print
                if starting: print 'starting...'
                if started:
                    if itm.get('href') == stopRef:
                        stopping = itm
                        stopped = True
                        extractions.append(itm)
                    else:
                        stopping = itm.find('a',href=stopRef)# or 
                    if stopping: print 'stopping...'
            except Exception, e:
                print 'arrggh', itm
                print e
        #print starting, stopping
        #Remove nodes up to and including startnode
        if starting and not started:
            ###print '123 ', itm
            for i in itm:
                print 'starting at ....' , i
                if i==starting:
                    #print 'found start...'
                    started = True
                    extractions.append(i)
                elif not started:
                    extractions.append(i)
            #continue
 
        #Remove nodes after and including stopnode
        if stopping and not stopped:
            #print 'stopping at....', itm
            for i in itm:
                #print i
                if i==stopping:
                    #print 'found end...'
                    stopped = True
                    extractions.append(i)
                elif stopped:
                    extractions.append(i)                   

            #continue
        if started:
            #if itm not in ('\n',u'\n','\t', u'\t' ):
                result.append(itm) 
            
        if stopped:
            break
    
    print extractions
    for ex in extractions:
        try:
            result.remove(ex) 
        except ValueError, err:
            continue


    print 'output...' 
    print result
    value = ''.join([str(x) for x in result])
    #print value
    #print 'end output'     
    return value
"""