#!/usr/bin/Python
# -*- coding: utf-8 -*-
import sys,os

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)

from JDLingYu import html_outputer

from JDLingYu import html_parser

from JDLingYu import html_downloader

from JDLingYu import url_manager

import threading

class JdlingyuMain(object):
    def __init__(self):
        #URL管理器
        self.urls = url_manager.UrlManager(3)
        #网页下载器
        self.downloader = html_downloader.HtmlDownloader()
        #网页解析器
        self.parser = html_parser.HtmlParder()
        #网页输出器
        self.outputer = html_outputer.HtmlOutputer()

    def craw(self):
        # page_urls = self.urls.get_page_urls(root_url,10)
        # count = 1
        # for page in page_urls:
        #     print "craw %d : %s" % (count, page)
        #     page_count = self.downloader.downloaderHtml(page)
        #     imgs_link = self.parser.parser_imgs_link(page_count)
        #     for i,img_url in enumerate(imgs_link):
        #         imgs_count = self.downloader.downloaderHtml(img_url)
        #         img_src = self.parser.parser_img(imgs_count)
        #         print "imgs %d: %s" % (i,img_src['title'])
        #         self.downloader.downImg(img_src['imgs'],img_src['title'])
        #         self.outputer.collect_data(img_src)
        #     count+=1
        # self.outputer.output_html()
        while self.urls.has_new_page_url():
            page_url = self.urls.get_page_urls()
            print page_url,'page页面解析中'
            page_count = self.downloader.downloaderHtml(page_url)
            pics_link = self.parser.parser_pics_link(page_count)
            for pic_page in pics_link:
                self.urls.add_pics_url(pic_page)

        while self.urls.han_new_pics_url():
            pic_url = self.urls.get_pics_urls()
            pic_count = self.downloader.downloaderHtml(pic_url)
            img_src = self.parser.parser_img(pic_count)
            print "imgs : %s" % (img_src['title'])
            print 'url:',img_src['imgs']
            self.downloader.downImg(img_src['imgs'], img_src['title'])

if __name__ == '__main__':
    # root_url = 'http://www.jdlingyu.moe/'
    jd_spider = JdlingyuMain()
    jd_spider.craw()
    # for i in range(5):
    #     th = threading.Thread(target=jd_spider.craw,args=(root_url,))
    #     th.start()