from reptile.blog_Spider import urlManager, htmlDownloader, htmlParser, htmlOutputer

class SpiderMain(object):
    def __init__(self):
        self.urls = urlManager.UrlManager()
        self.downloader = htmlDownloader.HtmlDownloader()
        self.parser = htmlParser.HtmlParser()
        self.outPuter = htmlOutputer.HtmlOutputer()

    def craw(self, rootUrl):
        count = 1
        self.urls.addNewUrl(rootUrl)
        while self.urls.hasNewUrl():
            try:
                newUrl = self.urls.getNewUrl()
                print('craw %d : %s' % (count, newUrl))
                htmlCont = self.downloader.download(newUrl)
                newUrls, newData = self.parser.parse(newUrl, htmlCont)
                self.urls.addNewUrls(newUrls)
                self.outPuter.collectData(newData)
                count += 1
            except:
                print('craw failed')
        self.outPuter.outputHtml()


if __name__ == "__main__":
    rootUrl = "https://blog.csdn.net/weixin_41475710"
    obj_spider = SpiderMain()
    obj_spider.craw(rootUrl)