from module import UrlManager
from module import Parser
from module import IndexParser
from module import NodeParser
from module import ContentParser
from module import TVParser
from module import ResParser
from module import Function
import os


class SpiderMain(object):
    def __init__(self):
        self.urls = UrlManager.UrlManager()
        self.parser = Parser.Parser()
        self.basedir = os.getcwd() + "/data/"

    def craw(self, root_url):
        count = 1

        html = Function.downloadhtml(root_url)
        new_urls,html = IndexParser.Parser().paser(root_url,html,self.basedir)
        ResParser.Parser().paser(root_url,html,self.basedir)

        for url in new_urls:
            self.urls.add_new_url(url)

        while self.urls.has_new_url():
            new_url = self.urls.get_new_url()

            print('下载网页开始：'+new_url)
            if new_url.find('node_')>-1:
                html = Function.downloadhtml(new_url)
                new_urls,html = NodeParser.Parser().paser(new_url,html,self.basedir)
                ResParser.Parser().paser(new_url, html, self.basedir)
                if new_urls is not None:
                    self.urls.add_new_urls(new_urls)

            if new_url.find('content_') > -1:
                html = Function.downloadhtml(new_url)
                new_urls, html = ContentParser.Parser().paser(new_url, html, self.basedir)
                ResParser.Parser().paser(new_url, html, self.basedir)
                if new_urls is not None:
                    self.urls.add_new_urls(new_urls)

            if new_url.find('tv.81.cn') > -1:
                html = Function.downloadhtml(new_url)
                new_urls, html = TVParser.Parser().paser(new_url, html, self.basedir)
                ResParser.Parser().paser(new_url, html, self.basedir)
                if new_urls is not None:
                    self.urls.add_new_urls(new_urls)
            print('下载网页结束：'+new_url)

if __name__ == '__main__':
    root_url = "http://www.81.cn/xue-xi/102726.htm"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)
