"""
爬虫调度器 --- 1.初始化各个模块
               2.crawl(root_url)方法传入入口url
               3.该方法内部实现按照运行流程控制各个模块的工作
"""
from architecture_crawl.HtmlDownloader import HtmlDownloader
from architecture_crawl.HtmlParser import HtmlParser
from architecture_crawl.UrlManager import UrlManager
from architecture_crawl.DataOutput import DataOutput

class SpiderMan(object):
    def __init__(self):
        self.htmlDownloader = HtmlDownloader()
        self.htmlParser = HtmlParser()
        self.urlManager = UrlManager()
        self.dataOutput = DataOutput()

    def crawl(self, root_url):
        self.urlManager.add_new_url(root_url)
        while(self.urlManager.has_new_url() and self.urlManager.old_url_size()<10):
            try:
                new_url = self.urlManager.get_new_url()
                html = self.htmlDownloader.download(new_url)
                urls, data = self.htmlParser.parser(new_url, html)
                self.urlManager.add_new_urls(urls)
                self.dataOutput.store_data(data)
                print("已经爬了%s链接" %self.urlManager.old_url_size())
            except Exception as e:
                print("crawl failed")
            self.dataOutput.output_html()
if __name__ in "__main__":
    spiderman = SpiderMan()
    spiderman.crawl("https://baike.baidu.com/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB%E7%A8%8B%E5%BA%8F/449844?fr=aladdin")

