from CxWoNiuBiJi.CxURLManager import CxURLManager
from CxWoNiuBiJi.CxHTMLParser import CxHTMLParser
from CxWoNiuBiJi.CxHTMLDownloader import CxHTMLDownloader
from CxWoNiuBiJi.CxDataOutput import CxDataOutput
import time


class CxSpiderMain(object):
    def __init__(self):
        self.manager = CxURLManager()
        self.parser = CxHTMLParser()
        self.downloader = CxHTMLDownloader()
        self.output = CxDataOutput()

    def Cxcrawl(self, root_url):
        count = 1
        self.manager.Cx_add_new_url(root_url)
        while self.manager.Cx_has_new_url():
            try:
                # 从URL管理器中取出一个url
                new_url = self.manager.Cx_get_new_url()
                print("次数：" + str(count) + " 正在爬取：" + new_url)
                # 下载器下载网页
                html = self.downloader.Cxdownload(url=new_url)
                # 解析器解析网页
                new_urlsORnew_data = self.parser.Cxparser(page_url=new_url, html_content=html)
                if (isinstance(new_urlsORnew_data, dict)):
                    # 将数据添加到数组中
                    self.output.Cx_save_csv(data=new_urlsORnew_data)
                else:
                    # 将url添加到待爬取的数组中
                    self.manager.Cx_add_new_urls(urls=new_urlsORnew_data)
                # 爬取个数，爬取结束循环
                if (count == 100):
                    break
                count = count + 1
                time.sleep(1)
            except:
                print("调度器发生错误！！！！------------------")
        self.output.Cx_write_csv()


if __name__ == '__main__':
    spider_main = CxSpiderMain()
    spider_main.Cxcrawl('http://www.woniuxy.com/note/page-1')
