'''
Created on 2009-11-16

@author: Jialai_Zhu
'''

import re
##print sys.path
#workpath = os.path.dirname(os.path.abspath(sys.argv[0]))
#print os.path.abspath(sys.argv[0])
##print sys.path
#print 'xxxxxx',os.getcwd() 
#print os.path.abspath('') 


def node_factory(parent, tag, url):
    import malnodeScript 
    import malnodeDriven
    node_array = {
               'node':node,
               'driven':malnodeDriven.node_driven,
               'script':malnodeScript.node_script,
               'iframe':node_iframe}
    try:return node_array[tag](tag, url, parent)
    except:return None
    
            
from sgmllib import SGMLParser as oldparser
class UrlExtract(oldparser):
    _urltags = {'script':1, 'iframe':1}    
    _urlattrs = {'href':1, 'src':1, 'data':1}
    '''Extracts URLs from a SGMLish document'''
    def reset(self):
        '''Resets SGML parser and clears lists'''
        oldparser.reset(self)
        self.nodes = []
    def handle_data(self, data):
        pass
    def finish_starttag(self, tag, attrs):
        '''Extracts URL bearing tags'''
        urltags, urlattrs = self._urltags, self._urlattrs        
        if tag in urltags:
            # Get key, vale in attributes if they match
            url = [v for k, v in attrs if k in urlattrs]
#            print url
            if url: 
                self.nodes.append([tag, url[0]])
                          

from urlparse import urljoin         
class node(object):
    def __init__(self, tag, url, parent=None):
        self.level = 1
        self.tag = tag
        self.code = '200'
        http = '([http|https]://)(.*?)[\s|\t|\"|>]'
        self.url = url
        try: 
            re.search(http, url + ' ').groups()
        except:
            if len(self.url):
#                parent.page_dir=link.normalize_url(parent.page_dir)   
                self.url = urljoin(parent.url, url)     
        

        self.page_dir = self.dir(self.url)            
        self.child = []
        self.parent = parent
        
        if self.parent != None:
            self.level = parent.level + 1
                
    def dir(self, _a):
        try:
            p = '(.*?)://(.*?)/*[^/]*$'
            textreg = re.search(p, _a).groups()
            dir = textreg[0] + '://' + textreg[1]
            return dir
        except Exception, ex:
            return _a
            
    def new(self, tag, url, factory=node_factory):
        return   node_factory(self, tag, url,)
        
    def pe(self):
        if self.page[0] == 'M' and self.page[1] == 'Z':
            return True
        return False
    
    def download(self):
        self.page = self.downloader.run()
              
    def get_child(self):
        if self.max_level():
            return self.child         
        
        if len(self.page) == 0:
            return self.child
        
        if self.pe():
            self.type = 'PE'
            return self.child
        
        urlget = UrlExtract()
        urlget.feed(self.page)
        urlget.close()
        for n in urlget.nodes:
            c = self.new(n[0], n[1])
            self.child.append(c)
#        self.page=''    
        return self.child
    def max_level(self):
        return self.level > 8
    
    def toString(self):
        i = 0
        data = ''
        while i < self.level:
            data = data + '\t'
            i += 1
        data = '%s(%d)%s(%s)' % (data, self.level, self.url, self.tag)
        return data    
            
class node_iframe(node):
    def get_child(self):
        return node.get_child(self)
    
#from decode import string
#from decode import link
#from decode import unescape
#class node_script(node):
#    def get_child(self):
#        node.get_child(self)
#        try:
#            strings = string.get_str(self.page)
#            if len(link.http1(strings)):
#                print strings
#        except:
#            pass        
#        return self.child
                            

      
 
