# -*- coding:utf-8 -*-
import url_manager, html_downloader, html_parser, html_outputer, image_manager


class SpiderMain:
    def __init__(self):
        self.urls = url_manager.UrlManager()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_outputer.HtmlOutputer()
        self.images = image_manager.ImageManager()

    def get_info(self, url):
        html_cont = self.downloader.download(url)
        result_links = self.parser.get_all_path(html_cont)
        self.urls.add_new_urls(result_links)
        result_images = self.parser.get_all_image(html_cont)
        self.images.add_new_images(result_images)

    def craw(self, root):
        self.get_info(root)
        while self.images.has_new_image():
            image = self.images.get_new_image()
            self.downloader.download_pic(image)
        while self.urls.has_new_url():
            self.get_info(self.urls.get_new_url())
        print(self.urls.get_index())
        print(self.images.get_count())
        # exit()
        # data = self.parser.get_path_and_name(html_cont)
        # self.urls.add_item(data)
        # self.outputer.output(data)
        # while self.urls.has_new_url():
        #     new_name, new_url = self.urls.get_new_url()
        #     print('see path =', new_url)
        #     html_cont = self.downloader.download(new_url)
        #     # vip_url = self.parser.get_vip_gif(html_cont)
        #     is_vip, vip_url = self.parser.parse(html_cont)
        #     if is_vip and vip_url is not None:
        #         self.downloader.download_vip_pic(new_name, vip_url)


if __name__ == '__main__':
    root_url = "http://www.daweijita.com"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)
