from xinlang import downloader
from xinlang import outputer
from xinlang import parser
from xinlang import url_manager


class SpiderMain:
    def __init__(self):
        self.urls = url_manager.UrlManager()
        self.downloader = downloader.HtmlDownloader()
        self.parser = parser.HtmlParser()
        self.outputer = outputer.HtmlOutputer()

    def craw(self, rootUrl):
        count= 1
        # self.urls.addNewUrl(rootUrl)
        htmlCont = self.downloader.download(rootUrl)
        urls = self.parser.parseToGetUrl(htmlCont)
        if urls is None:
            print("urls is None")
        print("11111")
        for url in urls:
            print(url)
        print("333")
        self.urls.addNewUrls(urls)
        while self.urls.hasNewUrl():
            try:
                newUrl = self.urls.getNewUrl()
                print('craw %d:%s' % (count, newUrl))
                htmlCont = self.downloader.download(newUrl)
                newData = self.parser.parse(newUrl, htmlCont)
                self.outputer.collectData(newData)
                count = count + 1
                if count == 10:
                    break
            except:
                print ('craw failed')
        self.outputer.outPutHtml()

if __name__=="__main__":
    rootUrl = "http://book.sina.com.cn/"
    obj_spider = SpiderMain()
    obj_spider.craw(rootUrl)