# coding=utf-8

from MQ import Consumer,Producer

from scrapy.selector import Selector
import urllib.parse as urlparse

from threading import Timer
import time

timer_interval = 600

IP="127.0.0.1"

root_url=["http://news.baidu.com/guonei",
        "http://news.baidu.com/guoji",
        "http://news.baidu.com/mil",
        "http://news.baidu.com/society",
        "http://news.baidu.com/finance",
        "http://news.baidu.com/ent",
        "http://news.baidu.com/sports",
        "http://news.baidu.com/internet",
        "http://news.baidu.com/tech",
        "http://news.baidu.com/game",
        "http://news.baidu.com/lady",
        "http://news.baidu.com/auto",
        "http://news.baidu.com/house"]


class UrlsCache:
    urls = set()

    def clean(self):
        self.urls = set()

    def cached(self,url):
        if url not in self.urls:
            self.urls.add(url)
            return False
        else:
            return True


if __name__ == '__main__':
    article_number = 1
    p = Producer(IP)
    p.declare_queue("url")

    article = Producer(IP)
    article.declare_queue("article")
    cache = UrlsCache()

    def crawl_again():
        print("+++Crawl Again+++")
        cache.clean()
        for url in root_url:
            cache.cached(url)
            p.send("url",url)

    t = Timer(timer_interval, crawl_again)
    t.start()
    crawl_again()

    def callback(channel, method, properties, content):  # 四个参数为标准格式
        global article_number
        #print(channel, method, properties)  # 打印看一下是什么
        # 管道内存对象  内容相关信息  后面讲
        print(" [x] Received %r" % content[:40])

        content = str(content,encoding = "utf-8")
        #time.sleep(15)
        url_end_index = content.index("\n")

        if url_end_index == -1:
            print("Wrong url format: %r" % content)
        else:
            url = content[:url_end_index]
            html = content[url_end_index+1:]

            host = url
            #host = urlparse.urlparse(url)
            if url in root_url:
                urls = Selector(text = html).xpath('//a/@href').extract()

                for url in urls:
                    url = urlparse.urljoin(host,url)
                    if cache.cached(url):
                        continue
                    p.send("url",url)
            else:
                print("article %d" % article_number)
                article_number = article_number + 1
                article.send("article",html)

        channel.basic_ack(delivery_tag = method.delivery_tag)

    c = Consumer(IP)
    c.run("content",callback=callback)