#!/usr/bin/env python
# -*- coding:utf-8 -*-
import Queue
from contrib.spider_http import *
from contrib.spider_html import *
import config
from contrib import lib
from contrib.spider_frontier import *
from contrib.dosql import *
import test
import sys
from threading import Thread
import thread
reload(sys)
sys.setdefaultencoding('utf-8')    # @UndefinedVariable

class Crawler():
    def __init__(self, nest, link_queue):
        self.nest = nest
        self.link_queue = link_queue
    def receive(self, url):
        self.url = url
        pass
    def run(self):
        downloader = Downloader()
        data = downloader.downLoadFile(self.url)
        if data == None:
            return 
        htmlparsertool =  test.MyHtmlTool(data)
        urls = htmlparsertool.extractUrlsFromData(self.url)
        if config.urlFilter(downloader.url):
            questions = htmlparsertool.extractItems(pipeline=config.pickQuestion)
            if questions != None:
                question = questions.next()
                answers = htmlparsertool.extractItems(pipeline=config.pickAnswer, extra=question)
                flag = self.nest.put(('question', question))
                if flag:
                    for answer in answers:
                        self.nest.put(('answer', answer))
        for url in filter(config.urlFilter, urls):
            self.link_queue.add(url)

class Schedule():
    def __init__(self):
        self.link_queue = WorkQueue()
        self.nest = Nest('nest', 'crawler.db')
        self.nest.initFromSql('answer.sql')
        self.count = 0
        pass

    def initCrawlerWithSeeds(self, seeds):
        for url in seeds:
            self.link_queue.add(url)
        pass
    
    def scheduling(self):
        spider = Crawler(self.nest, self.link_queue)
        self.nest.start()
        while not self.link_queue.empty():
            import time
            print self.count
            self.count += 1
            url = self.link_queue.get()
            spider.receive(url) 
            spider.run()
            time.sleep(1)
    
if __name__ == '__main__':
    
    start_urls = ['http://www.zhihu.com/question/20821374']
    sc = Schedule()
    sc.initCrawlerWithSeeds(start_urls)
    sc.scheduling()