'''
Created on Apr 15, 2011

@author: Jialai_Zhu
'''
import re
import os
import mechanize
from mechanize import UserAgentBase
BROWSERS = (
        # Top most popular browsers in my access.log on 2009.02.12
        # tail -50000 access.log |
        #  awk -F\" '{B[$6]++} END { for (b in B) { print B[b] ": " b } }' |
        #  sort -rn |
        #  head -20
                    'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
                    'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.6) Gecko/2009011912 Firefox/3.0.6',
                    'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
                    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6',
                    'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
                    'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
                    'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.48 Safari/525.19',
                    'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648)',
                    'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6',
                    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.5) Gecko/2008121621 Ubuntu/8.04 (hardy) Firefox/3.0.5',
                    'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-us) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/3.2.1 Safari/525.27.1',
                    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)',
                    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)',
                    'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
        ) 
   
class BrowserError(Exception):
    def __init__(self, url, error,code):
        self.url = url
        self.error = error
        self.code = code
class page_core():
    def set_level(self,level,maxlevel):
        self.maxlevel=maxlevel
        self.level=level
        pass
    def is_sucess(self):
        if self.BrowserError!=None:
            return False
        return True
    def allfilter(self,link):return True
    def htmlfilter(self):
        return True
    def open(self):
        if self.level==0:
            self.resp=self.browser().open(self.url)
        else:
            self.resp=self.browser().open(self.request)
        self.url=self.resp.geturl()
    def getresp(self):
        import urllib2
        import socket
        try:            
            print self.level*' '+'-',self.level,self.url,self.type
            self.open()
        except (urllib2.HTTPError, urllib2.URLError), e:
            try:code=e.reason.args[0]
            except:
                code=e.code
            raise BrowserError(self.url, str(e),code)
        except (socket.error, socket.sslerror), msg:
            raise BrowserError(self.url, msg)
        except socket.timeout, e:
            raise BrowserError(self.url, "timeout",30000)
        except KeyboardInterrupt:
            raise
        except:
            raise BrowserError(self.url, "unknown error",20000)
        if self.htmlfilter()==False:
            raise
    def html(self):
        try:
            return self.body
        except:
            self.body=self.resp.read()
        return self.body
    
    def httpcode(self):
        try:
            return self.resp.code
        except:
            return self.BrowserError.code
    def browser(self):
        try:
            return self.br
        except:
            self.br=mechanize.Browser(factory=mechanize.RobustFactory(urltags=self.urltags()))
            self.br.set_handle_robots(False)  
        return self.br  
    def urltags(self):return None
    def read(self):
        self.body=self.resp.read()  
        
       
    def all(self):
        if self.level+1>self.maxlevel:
            return
        try:
            for link in self.br.links():
                try:
                    page=self.click(link)
                    if  self.allfilter(link)==True:            
                        page.all()
                except:
                    pass
                self.child.append(page)
        except:
            pass
            
    def click(self,link):        
        return self.page_class(
                               request=self.br.click_link(link=link),
                               maxlevel=self.maxlevel,
                               level=self.level+1,
                               type=link.tag)
    def mkdir(self,path):   
        try:
            if os.path.isfile(path):
                os.remove(path)
                
            def find_not_exist(path):
                if os.path.exists(path)==False:
                    return path
                i=0
                while i<100:
                    t=('%s_(%d)')%(path,i)
                    if os.path.exists(t)==False:
                        return t
                return None 
                       
#            path=find_not_exist(path)    
        except:
            return None        
        try:
            os.makedirs(path)
            return path            
        except:
            return path
        
    def url2path(self,url):
        from urlparse import urlparse
        n = urlparse(url)
        domain=''
        port=80 
        try:            domain=n.hostname.encode('ascii','ignore')
        except:         pass
        try:            port=int(n.port)
        except:         pass
        domain_dir=n[0]+'_'+domain+'+'+str(port)
        try:
            path=n[2]
            if len(path)==0:
                path='/index'
            if os.path.isabs(path)==False:
                path='/'+path
        except:         pass
        return [domain_dir,path]
    
    
    def save(self,folder=None):
        if folder==None:
            folder=self.folder
        urlpath=self.url2path(self.url)
        if folder.find(urlpath[0])<0:
            domain_dir=os.path.join(folder,urlpath[0])
        else:
            domain_dir=folder
        domain_dir=self.mkdir(domain_dir)
        
        page_path=''.join([domain_dir,urlpath[1]])
        page_path=os.path.abspath(page_path)
        page_dir=os.path.split(page_path)[0]
        self.mkdir(page_dir)
       
        
        try:
            print self.html()
            open(page_path,'wb').write(self.html())
        except:
            pass
        
        for f in self.child:           
            f.save(domain_dir)
            
SITE_DEFAULT_ROOT=os.path.dirname(__file__)
class site(page_core):
    def __init__(self,folder=None,request=None,domain=None,url=None,level=0,maxlevel=3,type=''):
        self.set_level(level, maxlevel)
        self.request=request
        self.type=type        
        self.child=[]
        self.page_class=self.site_class()
        if folder==None:
            self.folder=SITE_DEFAULT_ROOT
        else:
            self.folder=folder
        if level==0:
            if domain!=None:
                pass
            else:
                self.url=url
        else:
            self.url=request.get_full_url()            
            self.request=request
        try:
            self.getresp()
        except BrowserError, e:
            print e
            self.BrowserError=e
            
    def site_class(self):return site
        

        
class script_crawler(site):
    def urltags(self):
        return {
                "iframe": "src",
                "embed":"src",
                "script":"src"
                }
    def site_class(self): 
        return script_crawler
    def allfilter(self,link): 
        if link.tag=='iframe':return True
        return False 
if __name__ == '__main__':
    import sys

    if len(sys.argv)==2:
        print sys.argv[1]
        s=script_crawler(url=sys.argv[1],maxlevel=5)
    else:
        s=script_crawler(url='http://www.sina.com.cn',maxlevel=5)
    s.all()
    s.save()
#    script_crawler(url='http://addown.mooo.com:10/images/1.htm',maxlevel=3).all()