#!/usr/bin/python
#coding=utf8

#prefix="http://neuronr.javaeye.com%s"
url="http://crackcell.javaeye.com"
class BlogCatcher(object):
    def __init__(self):
        self.url=''
        self.bloglist=[]
        self.filename=''
        self.compileRe()
    def __init__(self,url):
        self.url=url
        self.bloglist=[]
        self.filename=''
        self.compileRe()
    def compileRe(self):
        re=__import__('re')
        self.re_nextpage=re.compile(r'rel="next">.*</a>\s*<a href="/(?P<name>.*)(?=" class="next_page".*)')
        self.re_blogpage=re.compile(r"<h3 class='.*' title='.*'*<a href='/(.*)'>.*")
        self.re_foldername=re.compile(r'^http://(.*?)\..*')
        self.re_filename=re.compile(r'blog/(\d*)')

    def makeRealPage(self):
        self.filename=self.re_foldername.search(self.url).group(1)
        os=__import__('os')
        try:
            os.mkdir(self.filename)
        except:
            pass
        url=self.url
        if url.endswith('/'):
            url=url+'%s'
        else:
            url=url+'/%s'
        urls=map(lambda x:url%x,self.bloglist)
        def makePage(url_this):
            #print url
            page=self.getHtmlFromUrl(url%url_this)
            f=open('./%s/%s.html'%(self.filename,self.re_filename.search(url_this).group(1)),'w')
            f.write(page)
            f.close()
        map(makePage,self.bloglist)
    def getBlogListFromOnePage(self,page):
        return self.re_blogpage.findall(page)
    def getAllBlogList(self):
        url=self.url
        if url.endswith('/'):
            url=url+'%s'
        else:
            url=url+'/%s'
        suffix=''
        while not suffix is None:
            page=self.getHtmlFromUrl(url%suffix)
            self.bloglist+=self.getBlogListFromOnePage(page)
            href=self.re_nextpage.search(page)
            if href:
                suffix=href.group(1)
            else:
                suffix=None
    def getHtmlFromUrl(self,url):
        if url:
            urllib2=__import__('urllib2')
            req=urllib2.Request(url)
            req.add_header('User-Agent','Mozilla/5.0 (X11; U; Linux i686; zh-CN; rv:1.9.0.1) Gecko/2008072820 Ubuntu/8.04 (hardy) Firefox/3.0.3 GTB5')
            page=urllib2.urlopen(req)
            line=page.read()
            page.close()
            return line
        else:
            return None
    def run(self):
        self.getAllBlogList()
        self.makeRealPage()

catcher=BlogCatcher(url)
catcher.run()
#catcher.makeRealPage()
#print catcher.bloglist
