#!/usr/bin/python
# -*- coding:utf8 -*-
from baike_spider import url_manager
from baike_spider import html_downloader
from baike_spider import html_parser
from baike_spider import html_outputer

class SpiderMain(object):
    def __init__(self):
        # 爬虫URL管理器
        self.urls = url_manager.UrlManager()
        # 爬虫下载管理器
        self.downloader = html_downloader.HtmlDownloader()
        # 爬虫解析管理器
        self.parser = html_parser.HtmlParser()
        # 爬虫输出管理器
        self.outputer = html_outputer.HtmlOutputer()

    # 爬虫调用函数
    def craw(self, root_url):
        count = 1

        # 将rooturl添加进请求urrl中
        self.urls.add_new_url(root_url)
        while self.urls.has_new_url():
            # try:
            #     # 从新的urls取出url来进行爬虫
            #     new_url = self.urls.get_new_url()
            #     print("craw %d : %s" % (count, new_url))
            #     # 下载html内容
            #     html_content = self.downloader.download(new_url)
            #     # 通过parse解析数据
            #     print("new_url : %s" % new_url)
            #     new_urls, new_data = self.parser.parse(new_url, html_content)
            #     # 添加新的url
            #     self.urls.add_new_urls(new_urls)
            #     # 收集数据
            #     self.outputer.collect_data(new_data)
            #
            #     if count == 1000:
            #         break
            #     count += 1
            # except:
            #     print("craw failed")

            # 从新的urls取出url来进行爬虫
            new_url = self.urls.get_new_url()
            print("craw %d : %s" % (count, new_url))
            # 下载html内容
            html_content = self.downloader.download(new_url)
            # 通过parse解析数据
            # print("html_content : %s" % html_content)
            new_urls, new_data = self.parser.parse(new_url, html_content)
            # 添加新的url
            self.urls.add_new_urls(new_urls)
            # 收集数据
            self.outputer.collect_data(new_data)

            if count == 100:
                break
            count += 1

        # 输出结果
        self.outputer.output_html()

if __name__ == "__main__":
    # 入口URL
    root_url = "https://baike.baidu.com/item/Python/407313.html"
    # 爬虫对象
    obj_spider = SpiderMain()
    # 启动爬虫
    obj_spider.craw(root_url)