#!/usr/bin/Python
# -*- coding: utf-8 -*-
from jdlingyu import html_downloader
from jdlingyu import html_outputer
from jdlingyu import html_parser
from jdlingyu import url_manager


class JdlingyuMain(object):
    def __init__(self):
        #URL管理器
        self.urls = url_manager.UrlManager()
        #网页下载器
        self.downloader = html_downloader.HtmlDownloader()
        #网页解析器
        self.parser = html_parser.HtmlParder()
        #网页输出器
        self.outputer = html_outputer.HtmlOutputer()

    def craw(self, root_url):
        page_urls = self.urls.get_page_urls(root_url,5)
        count = 1
        for page in page_urls:
            print "craw %d : %s" % (count, page)
            page_count = self.downloader.downloaderHtml(page)
            imgs_link = self.parser.parser_imgs_link(page_count)
            for i,img_url in enumerate(imgs_link):
                imgs_count = self.downloader.downloaderHtml(img_url)
                img_src = self.parser.parser_img(imgs_count)
                print "imgs %d: %s" % (i,img_src['title'])
                self.outputer.collect_data(img_src)
            count+=1
        self.outputer.output_html()
if __name__ == '__main__':
    root_url = 'http://www.jdlingyu.moe/'
    jd_spider = JdlingyuMain()
    jd_spider.craw(root_url)